diff --git a/libcxx/include/__config b/libcxx/include/__config --- a/libcxx/include/__config +++ b/libcxx/include/__config @@ -1226,7 +1226,9 @@ #if __has_feature(cxx_atomic) || __has_extension(c_atomic) || __has_keyword(_Atomic) # define _LIBCPP_HAS_C_ATOMIC_IMP -#elif defined(_LIBCPP_COMPILER_GCC) +#endif + +#if defined(_LIBCPP_COMPILER_GCC) || (__has_builtin(__atomic_load) && __has_builtin(__atomic_store) && __has_builtin(__atomic_exchange) && __has_builtin(__atomic_compare_exchange)) # define _LIBCPP_HAS_GCC_ATOMIC_IMP #endif diff --git a/libcxx/include/atomic b/libcxx/include/atomic --- a/libcxx/include/atomic +++ b/libcxx/include/atomic @@ -108,6 +108,31 @@ void notify_all() noexcept; }; +template +struct atomic_ref // since C++20 +{ + static constexpr bool is_always_lock_free; + static constexpr size_t required_alignment; + + explicit atomic_ref(T& obj) noexcept; + atomic_ref(const atomic_ref&) noexcept; + T operator=(T desired) const noexcept; + atomic_ref& operator=(const atomic_ref&) = delete; + + bool is_lock_free() const noexcept; + void store(T desr, memory_order m = memory_order_seq_cst) const noexcept; + T load(memory_order m = memory_order_seq_cst) const noexcept; + operator T() const noexcept; + T exchange(T desr, memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_weak(T& expc, T desr, memory_order s, memory_order f) const noexcept; + bool compare_exchange_strong(T& expc, T desr, + memory_order s, memory_order f) const noexcept; + bool compare_exchange_weak(T& expc, T desr, + memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_strong(T& expc, T desr, + memory_order m = memory_order_seq_cst) const noexcept; +}; + template <> struct atomic { @@ -115,15 +140,10 @@ using difference_type = value_type; static constexpr bool is_always_lock_free; + bool is_lock_free() const volatile noexcept; bool is_lock_free() const noexcept; - atomic() noexcept = default; - constexpr atomic(integral desr) noexcept; - atomic(const atomic&) = delete; - atomic& operator=(const atomic&) = delete; - atomic& operator=(const atomic&) volatile = delete; - integral load(memory_order m = memory_order_seq_cst) const volatile noexcept; integral load(memory_order m = memory_order_seq_cst) const noexcept; operator integral() const volatile noexcept; @@ -164,6 +184,14 @@ integral fetch_xor(integral op, memory_order m = memory_order_seq_cst) volatile noexcept; integral fetch_xor(integral op, memory_order m = memory_order_seq_cst) noexcept; + atomic() noexcept = default; + constexpr atomic(integral desr) noexcept; + atomic(const atomic&) = delete; + atomic& operator=(const atomic&) = delete; + atomic& operator=(const atomic&) volatile = delete; + integral operator=(integral desr) volatile noexcept; + integral operator=(integral desr) noexcept; + integral operator++(int) volatile noexcept; integral operator++(int) noexcept; integral operator--(int) volatile noexcept; @@ -191,6 +219,47 @@ void notify_all() noexcept; }; +template <> +struct atomic_ref // since C++20 +{ + static constexpr bool is_always_lock_free; + static constexpr size_t required_alignment; + + explicit atomic_ref(integral& obj) noexcept; + atomic_ref(const atomic_ref&) noexcept; + integral operator=(integral desired) const noexcept; + atomic_ref& operator=(const atomic_ref&) = delete; + + bool is_lock_free() const noexcept; + void store(T desr, memory_order m = memory_order_seq_cst) const noexcept; + integral load(memory_order m = memory_order_seq_cst) const noexcept; + operator integral() const noexcept; + integral exchange(integral desr, memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_weak(integral& expc, integral desr, memory_order s, memory_order f) const noexcept; + bool compare_exchange_strong(integral& expc, integral desr, + memory_order s, memory_order f) const noexcept; + bool compare_exchange_weak(integral& expc, integral desr, + memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_strong(integral& expc, integral desr, + memory_order m = memory_order_seq_cst) const noexcept; + + integral fetch_add(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_sub(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_and(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_or(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_xor(integral op, memory_order m = memory_order_seq_cst) const noexcept; + + integral operator++(int) const noexcept; + integral operator--(int) const noexcept; + integral operator++() const noexcept; + integral operator--() const noexcept; + integral operator+=(integral op) const noexcept; + integral operator-=(integral op) const noexcept; + integral operator&=(integral op) const noexcept; + integral operator|=(integral op) const noexcept; + integral operator^=(integral op) const noexcept; +}; + template struct atomic { @@ -260,6 +329,40 @@ void notify_all() noexcept; }; +template +struct atomic_ref // since C++20 +{ + static constexpr bool is_always_lock_free; + static constexpr size_t required_alignment; + + explicit atomic_ref(T* obj) noexcept; + atomic_ref(const atomic_ref&) noexcept; + T* operator=(T* desired) const noexcept; + atomic_ref& operator=(const atomic_ref&) = delete; + + bool is_lock_free() const noexcept; + void store(T* desr, memory_order m = memory_order_seq_cst) const noexcept; + T* load(memory_order m = memory_order_seq_cst) const noexcept; + operator T*() const noexcept; + T* exchange(T* desr, memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_weak(T*& expc, T* desr, memory_order s, memory_order f) const noexcept; + bool compare_exchange_strong(T*& expc, T* desr, + memory_order s, memory_order f) const noexcept; + bool compare_exchange_weak(T*& expc, T* desr, + memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_strong(T*& expc, T* desr, + memory_order m = memory_order_seq_cst) const noexcept; + + T* fetch_add(ptrdiff_t op, memory_order m = memory_order_seq_cst) noexcept; + T* fetch_sub(ptrdiff_t op, memory_order m = memory_order_seq_cst) noexcept; + + T* operator++(int) const noexcept; + T* operator--(int) const noexcept; + T* operator++() const noexcept; + T* operator--() const noexcept; + T* operator+=(ptrdiff_t op) const noexcept; + T* operator-=(ptrdiff_t op) const noexcept; +}; template bool atomic_is_lock_free(const volatile atomic* obj) noexcept; @@ -704,9 +807,24 @@ #endif // _LIBCPP_CXX03_LANG _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT : __a_value(value) {} + using __contained_t = _Tp; _Tp __a_value; }; +template class _TemplateTp> +struct __is_instantiation_of : false_type { }; + +template class _TemplateTp> +struct __is_instantiation_of<_TemplateTp<_Tp>, _TemplateTp> : true_type {}; + +template ::type, __cxx_atomic_base_impl>::value, bool>::type> +struct __cxx_atomic_base_impl_traits { + static constexpr bool __is_value_volatile = is_volatile<_Tp>::value; + static constexpr bool __is_value_ref = is_reference::value; + using __underlying_t = typename remove_volatile::type>::type; + static constexpr bool __is_value_pointer = is_pointer<__underlying_t>::value; +}; + _LIBCPP_INLINE_VISIBILITY inline _LIBCPP_CONSTEXPR int __to_gcc_order(memory_order __order) { // Avoid switch statement to make this a constexpr. return __order == memory_order_relaxed ? __ATOMIC_RELAXED: @@ -727,15 +845,15 @@ __ATOMIC_CONSUME)))); } -template +template ::__is_value_volatile, bool>::type = 0> _LIBCPP_INLINE_VISIBILITY -void __cxx_atomic_init(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val) { +void __cxx_atomic_init(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __val) { __cxx_atomic_assign_volatile(__a->__a_value, __val); } -template +template ::__is_value_volatile, bool>::type = 0> _LIBCPP_INLINE_VISIBILITY -void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val) { +void __cxx_atomic_init(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __val) { __a->__a_value = __val; } @@ -751,25 +869,17 @@ template _LIBCPP_INLINE_VISIBILITY -void __cxx_atomic_store(volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp __val, - memory_order __order) { +void __cxx_atomic_store(_Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __val, memory_order __order) { __atomic_store(&__a->__a_value, &__val, __to_gcc_order(__order)); } template -_LIBCPP_INLINE_VISIBILITY -void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp>* __a, _Tp __val, - memory_order __order) { - __atomic_store(&__a->__a_value, &__val, - __to_gcc_order(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_load(const volatile __cxx_atomic_base_impl<_Tp>* __a, +_LIBCPP_INLINE_VISIBILITY +typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t +__cxx_atomic_load(const _Tp* __a, memory_order __order) { - _Tp __ret; + typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __ret; __atomic_load(&__a->__a_value, &__ret, __to_gcc_order(__order)); return __ret; @@ -777,28 +887,11 @@ template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_load(const __cxx_atomic_base_impl<_Tp>* __a, memory_order __order) { - _Tp __ret; - __atomic_load(&__a->__a_value, &__ret, - __to_gcc_order(__order)); - return __ret; -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_exchange(volatile __cxx_atomic_base_impl<_Tp>* __a, - _Tp __value, memory_order __order) { - _Tp __ret; - __atomic_exchange(&__a->__a_value, &__value, &__ret, - __to_gcc_order(__order)); - return __ret; -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp>* __a, _Tp __value, - memory_order __order) { - _Tp __ret; +typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t +__cxx_atomic_exchange(_Tp* __a, + typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __value, + memory_order __order) { + typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __ret; __atomic_exchange(&__a->__a_value, &__value, &__ret, __to_gcc_order(__order)); return __ret; @@ -807,7 +900,8 @@ template _LIBCPP_INLINE_VISIBILITY bool __cxx_atomic_compare_exchange_strong( - volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, + _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t* __expected, + typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __value, memory_order __success, memory_order __failure) { return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, false, @@ -815,21 +909,11 @@ __to_gcc_failure_order(__failure)); } -template -_LIBCPP_INLINE_VISIBILITY -bool __cxx_atomic_compare_exchange_strong( - __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, - memory_order __failure) { - return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, - false, - __to_gcc_order(__success), - __to_gcc_failure_order(__failure)); -} - template _LIBCPP_INLINE_VISIBILITY bool __cxx_atomic_compare_exchange_weak( - volatile __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, + _Tp* __a, typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t* __expected, + typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __value, memory_order __success, memory_order __failure) { return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, true, @@ -837,17 +921,6 @@ __to_gcc_failure_order(__failure)); } -template -_LIBCPP_INLINE_VISIBILITY -bool __cxx_atomic_compare_exchange_weak( - __cxx_atomic_base_impl<_Tp>* __a, _Tp* __expected, _Tp __value, memory_order __success, - memory_order __failure) { - return __atomic_compare_exchange(&__a->__a_value, __expected, &__value, - true, - __to_gcc_order(__success), - __to_gcc_failure_order(__failure)); -} - template struct __skip_amt { enum {value = 1}; }; @@ -863,259 +936,55 @@ template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_add(volatile __cxx_atomic_base_impl<_Tp>* __a, +typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t +__cxx_atomic_fetch_add(_Tp* __a, _Td __delta, memory_order __order) { - return __atomic_fetch_add(&__a->__a_value, __delta * __skip_amt<_Tp>::value, - __to_gcc_order(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, - memory_order __order) { - return __atomic_fetch_add(&__a->__a_value, __delta * __skip_amt<_Tp>::value, + return __atomic_fetch_add(&__a->__a_value, __delta * __skip_amt::__underlying_t>::value, __to_gcc_order(__order)); } template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_sub(volatile __cxx_atomic_base_impl<_Tp>* __a, +typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t +__cxx_atomic_fetch_sub(_Tp* __a, _Td __delta, memory_order __order) { - return __atomic_fetch_sub(&__a->__a_value, __delta * __skip_amt<_Tp>::value, - __to_gcc_order(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp>* __a, _Td __delta, - memory_order __order) { - return __atomic_fetch_sub(&__a->__a_value, __delta * __skip_amt<_Tp>::value, + return __atomic_fetch_sub(&__a->__a_value, __delta * __skip_amt::__underlying_t>::value, __to_gcc_order(__order)); } template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_and(volatile __cxx_atomic_base_impl<_Tp>* __a, - _Tp __pattern, memory_order __order) { +typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t +__cxx_atomic_fetch_and(_Tp* __a, + typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __pattern, + memory_order __order) { return __atomic_fetch_and(&__a->__a_value, __pattern, __to_gcc_order(__order)); } template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp>* __a, - _Tp __pattern, memory_order __order) { - return __atomic_fetch_and(&__a->__a_value, __pattern, - __to_gcc_order(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_or(volatile __cxx_atomic_base_impl<_Tp>* __a, - _Tp __pattern, memory_order __order) { - return __atomic_fetch_or(&__a->__a_value, __pattern, - __to_gcc_order(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, - memory_order __order) { +typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t +__cxx_atomic_fetch_or(_Tp* __a, + typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __pattern, + memory_order __order) { return __atomic_fetch_or(&__a->__a_value, __pattern, __to_gcc_order(__order)); } template _LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_xor(volatile __cxx_atomic_base_impl<_Tp>* __a, - _Tp __pattern, memory_order __order) { - return __atomic_fetch_xor(&__a->__a_value, __pattern, - __to_gcc_order(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp>* __a, _Tp __pattern, - memory_order __order) { +typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t +__cxx_atomic_fetch_xor(_Tp* __a, + typename __cxx_atomic_base_impl_traits<_Tp>::__underlying_t __pattern, + memory_order __order) { return __atomic_fetch_xor(&__a->__a_value, __pattern, __to_gcc_order(__order)); } #define __cxx_atomic_is_lock_free(__s) __atomic_is_lock_free(__s, 0) -#elif defined(_LIBCPP_HAS_C_ATOMIC_IMP) - -template -struct __cxx_atomic_base_impl { - - _LIBCPP_INLINE_VISIBILITY -#ifndef _LIBCPP_CXX03_LANG - __cxx_atomic_base_impl() _NOEXCEPT = default; -#else - __cxx_atomic_base_impl() _NOEXCEPT : __a_value() {} -#endif // _LIBCPP_CXX03_LANG - _LIBCPP_CONSTEXPR explicit __cxx_atomic_base_impl(_Tp value) _NOEXCEPT - : __a_value(value) {} - _LIBCPP_DISABLE_EXTENSION_WARNING _Atomic(_Tp) __a_value; -}; - -#define __cxx_atomic_is_lock_free(__s) __c11_atomic_is_lock_free(__s) - -_LIBCPP_INLINE_VISIBILITY inline -void __cxx_atomic_thread_fence(memory_order __order) _NOEXCEPT { - __c11_atomic_thread_fence(static_cast<__memory_order_underlying_t>(__order)); -} - -_LIBCPP_INLINE_VISIBILITY inline -void __cxx_atomic_signal_fence(memory_order __order) _NOEXCEPT { - __c11_atomic_signal_fence(static_cast<__memory_order_underlying_t>(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val) _NOEXCEPT { - __c11_atomic_init(&__a->__a_value, __val); -} -template -_LIBCPP_INLINE_VISIBILITY -void __cxx_atomic_init(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val) _NOEXCEPT { - __c11_atomic_init(&__a->__a_value, __val); -} - -template -_LIBCPP_INLINE_VISIBILITY -void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __val, memory_order __order) _NOEXCEPT { - __c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order)); -} -template -_LIBCPP_INLINE_VISIBILITY -void __cxx_atomic_store(__cxx_atomic_base_impl<_Tp> * __a, _Tp __val, memory_order __order) _NOEXCEPT { - __c11_atomic_store(&__a->__a_value, __val, static_cast<__memory_order_underlying_t>(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const volatile* __a, memory_order __order) _NOEXCEPT { - using __ptr_type = typename remove_const__a_value)>::type*; - return __c11_atomic_load(const_cast<__ptr_type>(&__a->__a_value), static_cast<__memory_order_underlying_t>(__order)); -} -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_load(__cxx_atomic_base_impl<_Tp> const* __a, memory_order __order) _NOEXCEPT { - using __ptr_type = typename remove_const__a_value)>::type*; - return __c11_atomic_load(const_cast<__ptr_type>(&__a->__a_value), static_cast<__memory_order_underlying_t>(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __value, memory_order __order) _NOEXCEPT { - return __c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order)); -} -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_exchange(__cxx_atomic_base_impl<_Tp> * __a, _Tp __value, memory_order __order) _NOEXCEPT { - return __c11_atomic_exchange(&__a->__a_value, __value, static_cast<__memory_order_underlying_t>(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { - return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); -} -template -_LIBCPP_INLINE_VISIBILITY -bool __cxx_atomic_compare_exchange_strong(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { - return __c11_atomic_compare_exchange_strong(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); -} - -template -_LIBCPP_INLINE_VISIBILITY -bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { - return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); -} -template -_LIBCPP_INLINE_VISIBILITY -bool __cxx_atomic_compare_exchange_weak(__cxx_atomic_base_impl<_Tp> * __a, _Tp* __expected, _Tp __value, memory_order __success, memory_order __failure) _NOEXCEPT { - return __c11_atomic_compare_exchange_weak(&__a->__a_value, __expected, __value, static_cast<__memory_order_underlying_t>(__success), static_cast<__memory_order_underlying_t>(__failure)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); -} -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); -} -template -_LIBCPP_INLINE_VISIBILITY -_Tp* __cxx_atomic_fetch_add(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_add(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); -} -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp> * __a, _Tp __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); -} -template -_LIBCPP_INLINE_VISIBILITY -_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> volatile* __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); -} -template -_LIBCPP_INLINE_VISIBILITY -_Tp* __cxx_atomic_fetch_sub(__cxx_atomic_base_impl<_Tp*> * __a, ptrdiff_t __delta, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_sub(&__a->__a_value, __delta, static_cast<__memory_order_underlying_t>(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_and(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); -} -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_and(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_and(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_or(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); -} -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_or(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_or(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); -} - -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> volatile* __a, _Tp __pattern, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_xor(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); -} -template -_LIBCPP_INLINE_VISIBILITY -_Tp __cxx_atomic_fetch_xor(__cxx_atomic_base_impl<_Tp> * __a, _Tp __pattern, memory_order __order) _NOEXCEPT { - return __c11_atomic_fetch_xor(&__a->__a_value, __pattern, static_cast<__memory_order_underlying_t>(__order)); -} - -#endif // _LIBCPP_HAS_GCC_ATOMIC_IMP, _LIBCPP_HAS_C_ATOMIC_IMP +#endif // _LIBCPP_HAS_GCC_ATOMIC_IMP template _LIBCPP_INLINE_VISIBILITY @@ -1378,6 +1247,7 @@ __a->__unlock(); return __old; } +// TODO specialize for pointer fetch_sub template _LIBCPP_INLINE_VISIBILITY @@ -1486,12 +1356,7 @@ typename _Base = __cxx_atomic_base_impl<_Tp> > #endif //_LIBCPP_ATOMIC_ONLY_USE_BUILTINS struct __cxx_atomic_impl : public _Base { - -#if _GNUC_VER >= 501 - static_assert(is_trivially_copyable<_Tp>::value, - "std::atomic requires that 'Tp' be a trivially copyable type"); -#endif - + using __base = _Base; _LIBCPP_INLINE_VISIBILITY __cxx_atomic_impl() _NOEXCEPT _LIBCPP_DEFAULT _LIBCPP_INLINE_VISIBILITY _LIBCPP_CONSTEXPR explicit __cxx_atomic_impl(_Tp value) _NOEXCEPT : _Base(value) {} @@ -1595,6 +1460,12 @@ static _LIBCPP_CONSTEXPR bool is_always_lock_free = __atomic_always_lock_free(sizeof(__a_), 0); #endif +// TODO perform this check when compiled with clang too. +#if _GNUC_VER >= 501 + static_assert(is_trivially_copyable<_Tp>::value, + "std::atomic requires that 'Tp' be a trivially copyable type"); +#endif + _LIBCPP_INLINE_VISIBILITY bool is_lock_free() const volatile _NOEXCEPT {return __cxx_atomic_is_lock_free(sizeof(_Tp));} @@ -1698,9 +1569,83 @@ #endif }; +// general atomic_ref + +template ::value && !is_same<_Tp, bool>::value> +struct __atomic_ref_base // false +{ + mutable __cxx_atomic_impl<_Tp&> __a_; + +#if defined(__cpp_lib_atomic_is_always_lock_free) + static _LIBCPP_CONSTEXPR bool is_always_lock_free = __atomic_always_lock_free(sizeof(__a_.__a_value), 0); +#endif + +#if defined(_LIBCPP_ATOMIC_ONLY_USE_BUILTINS) + static_assert(__cxx_is_always_lock_free<_Tp>::__value, "std::atomic_ref<_Tp> requires _Tp to always be lock free when built using builtin only mode"); +#endif + + static_assert(is_trivially_copyable<_Tp>::value, "std::atomic_ref<_Tp> requires that '_Tp' be a trivially copyable type"); + + static _LIBCPP_CONSTEXPR size_t required_alignment = alignment_of::__a_value)>::value; + + _LIBCPP_INLINE_VISIBILITY + bool is_lock_free() const _NOEXCEPT + {return __cxx_atomic_is_lock_free(sizeof(_Tp));} + _LIBCPP_INLINE_VISIBILITY + void store(_Tp __d, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + _LIBCPP_CHECK_STORE_MEMORY_ORDER(__m) + {__cxx_atomic_store(&__a_, __d, __m);} + _LIBCPP_INLINE_VISIBILITY + _Tp load(memory_order __m = memory_order_seq_cst) const _NOEXCEPT + _LIBCPP_CHECK_LOAD_MEMORY_ORDER(__m) + {return __cxx_atomic_load(&__a_, __m);} + _LIBCPP_INLINE_VISIBILITY + operator _Tp() const _NOEXCEPT {return load();} + _LIBCPP_INLINE_VISIBILITY + _Tp exchange(_Tp __d, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_exchange(&__a_, __d, __m);} + _LIBCPP_INLINE_VISIBILITY + bool compare_exchange_weak(_Tp& __e, _Tp __d, + memory_order __s, memory_order __f) const _NOEXCEPT + _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) + {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __s, __f);} + _LIBCPP_INLINE_VISIBILITY + bool compare_exchange_strong(_Tp& __e, _Tp __d, + memory_order __s, memory_order __f) const _NOEXCEPT + _LIBCPP_CHECK_EXCHANGE_MEMORY_ORDER(__s, __f) + {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __s, __f);} + _LIBCPP_INLINE_VISIBILITY + bool compare_exchange_weak(_Tp& __e, _Tp __d, + memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_compare_exchange_weak(&__a_, &__e, __d, __m, __m);} + _LIBCPP_INLINE_VISIBILITY + bool compare_exchange_strong(_Tp& __e, _Tp __d, + memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_compare_exchange_strong(&__a_, &__e, __d, __m, __m);} + + _LIBCPP_INLINE_VISIBILITY _LIBCPP_EXPLICIT + __atomic_ref_base(_Tp& __d) _NOEXCEPT : __a_(__d) {} + _LIBCPP_INLINE_VISIBILITY + __atomic_ref_base(const __atomic_ref_base& other) _NOEXCEPT + : __a_(other.__a_) {} + + _Tp operator=(const _Tp __d) const _NOEXCEPT { + store(__d); + return __d; + } +#ifndef _LIBCPP_CXX03_LANG + __atomic_ref_base& operator=(const __atomic_ref_base&) = delete; +#else +private: + __atomic_ref_base& operator=(const __atomic_ref_base&); +#endif +}; + #if defined(__cpp_lib_atomic_is_always_lock_free) template _LIBCPP_CONSTEXPR bool __atomic_base<_Tp, __b>::is_always_lock_free; +template +_LIBCPP_CONSTEXPR bool __atomic_ref_base<_Tp, __b>::is_always_lock_free; #endif // atomic @@ -1784,6 +1729,60 @@ _Tp operator^=(_Tp __op) _NOEXCEPT {return fetch_xor(__op) ^ __op;} }; +// atomic_ref + +template +struct __atomic_ref_base<_Tp, true> + : public __atomic_ref_base<_Tp, false> +{ + typedef __atomic_ref_base<_Tp, false> __base; + + _LIBCPP_INLINE_VISIBILITY _LIBCPP_EXPLICIT + __atomic_ref_base(_Tp& __d) _NOEXCEPT : __base(__d) {} + _LIBCPP_INLINE_VISIBILITY + __atomic_ref_base(const __atomic_ref_base& other) _NOEXCEPT + : __base(other) {} + + _Tp operator=(const _Tp __d) const _NOEXCEPT { + return __base::operator=(__d); + } + + _LIBCPP_INLINE_VISIBILITY + _Tp fetch_add(_Tp __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + _LIBCPP_INLINE_VISIBILITY + _Tp fetch_sub(_Tp __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + _LIBCPP_INLINE_VISIBILITY + _Tp fetch_and(_Tp __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_and(&this->__a_, __op, __m);} + _LIBCPP_INLINE_VISIBILITY + _Tp fetch_or(_Tp __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_or(&this->__a_, __op, __m);} + _LIBCPP_INLINE_VISIBILITY + _Tp fetch_xor(_Tp __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_xor(&this->__a_, __op, __m);} + + _LIBCPP_INLINE_VISIBILITY + _Tp operator++(int) const _NOEXCEPT {return fetch_add(_Tp(1));} + _LIBCPP_INLINE_VISIBILITY + _Tp operator--(int) const _NOEXCEPT {return fetch_sub(_Tp(1));} + _LIBCPP_INLINE_VISIBILITY + _Tp operator++() const _NOEXCEPT {return fetch_add(_Tp(1)) + _Tp(1);} + _LIBCPP_INLINE_VISIBILITY + _Tp operator--() const _NOEXCEPT {return fetch_sub(_Tp(1)) - _Tp(1);} + _LIBCPP_INLINE_VISIBILITY + _Tp operator+=(_Tp __op) const _NOEXCEPT {return fetch_add(__op) + __op;} + _LIBCPP_INLINE_VISIBILITY + _Tp operator-=(_Tp __op) const _NOEXCEPT {return fetch_sub(__op) - __op;} + _LIBCPP_INLINE_VISIBILITY + _Tp operator&=(_Tp __op) const _NOEXCEPT {return fetch_and(__op) & __op;} + _LIBCPP_INLINE_VISIBILITY + _Tp operator|=(_Tp __op) const _NOEXCEPT {return fetch_or(__op) | __op;} + _LIBCPP_INLINE_VISIBILITY + _Tp operator^=(_Tp __op) const _NOEXCEPT {return fetch_xor(__op) ^ __op;} +}; + // atomic template @@ -1806,6 +1805,23 @@ {__base::store(__d); return __d;} }; +// atomic_ref +template +struct atomic_ref + : public __atomic_ref_base<_Tp> +{ + typedef __atomic_ref_base<_Tp> __base; + _LIBCPP_INLINE_VISIBILITY + _LIBCPP_EXPLICIT atomic_ref(_Tp& obj) : __base(obj) {} + _LIBCPP_INLINE_VISIBILITY + atomic_ref(const atomic_ref& ref) _NOEXCEPT : __base(ref) {} + + _LIBCPP_INLINE_VISIBILITY + _Tp operator=(const _Tp __d) const _NOEXCEPT { + return __base::operator=(__d); + } +}; + // atomic template @@ -1868,6 +1884,44 @@ _Tp* operator-=(ptrdiff_t __op) _NOEXCEPT {return fetch_sub(__op) - __op;} }; +// atomic_ref + +template +struct atomic_ref<_Tp*> + : public __atomic_ref_base<_Tp*> +{ + typedef __atomic_ref_base<_Tp*> __base; + _LIBCPP_INLINE_VISIBILITY + _LIBCPP_EXPLICIT atomic_ref(_Tp*& obj) : __base(obj) {} + _LIBCPP_INLINE_VISIBILITY + atomic_ref(const atomic_ref& ref) _NOEXCEPT : __base(ref) {} + + _LIBCPP_INLINE_VISIBILITY + _Tp* operator=(_Tp* __d) const _NOEXCEPT { + return __base::operator=(__d); + } + + _LIBCPP_INLINE_VISIBILITY + _Tp* fetch_add(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_add(&this->__a_, __op, __m);} + _LIBCPP_INLINE_VISIBILITY + _Tp* fetch_sub(ptrdiff_t __op, memory_order __m = memory_order_seq_cst) const _NOEXCEPT + {return __cxx_atomic_fetch_sub(&this->__a_, __op, __m);} + + _LIBCPP_INLINE_VISIBILITY + _Tp* operator++(int) const _NOEXCEPT {return fetch_add(1);} + _LIBCPP_INLINE_VISIBILITY + _Tp* operator--(int) const _NOEXCEPT {return fetch_sub(1);} + _LIBCPP_INLINE_VISIBILITY + _Tp* operator++() const _NOEXCEPT {return fetch_add(1) + 1;} + _LIBCPP_INLINE_VISIBILITY + _Tp* operator--() const _NOEXCEPT {return fetch_sub(1) - 1;} + _LIBCPP_INLINE_VISIBILITY + _Tp* operator+=(ptrdiff_t __op) const _NOEXCEPT {return fetch_add(__op) + __op;} + _LIBCPP_INLINE_VISIBILITY + _Tp* operator-=(ptrdiff_t __op) const _NOEXCEPT {return fetch_sub(__op) - __op;} +}; + // atomic_is_lock_free template diff --git a/libcxx/test/std/atomics/atomics.lockfree/isalwayslockfree.pass.cpp b/libcxx/test/std/atomics/atomics.lockfree/isalwayslockfree.pass.cpp --- a/libcxx/test/std/atomics/atomics.lockfree/isalwayslockfree.pass.cpp +++ b/libcxx/test/std/atomics/atomics.lockfree/isalwayslockfree.pass.cpp @@ -22,8 +22,13 @@ #endif template void checkAlwaysLockFree() { - if (std::atomic::is_always_lock_free) + assert(std::atomic::is_always_lock_free == + std::atomic_ref::is_always_lock_free); + if (std::atomic::is_always_lock_free) { assert(std::atomic().is_lock_free()); + T val; + assert(std::atomic_ref(val).is_lock_free()); + } } // FIXME: This separate test is needed to work around llvm.org/PR31864 diff --git a/libcxx/test/std/atomics/atomics.types.generic/address.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/address.pass.cpp --- a/libcxx/test/std/atomics/atomics.types.generic/address.pass.cpp +++ b/libcxx/test/std/atomics/atomics.types.generic/address.pass.cpp @@ -75,6 +75,7 @@ #include #include "test_macros.h" +#include "test_compare_exchange_helpers.h" template void @@ -130,15 +131,125 @@ } template -void test() +void test() { + do_test(); + do_test(); +} + +/* +template +struct atomic_ref { - do_test(); - do_test(); + static constexpr bool is_always_lock_free; + static constexpr size_t required_alignment; + + explicit atomic_ref(T* obj) noexcept; + atomic_ref(const atomic_ref&) noexcept; + T* operator=(T* desired) const noexcept; + atomic_ref& operator=(const atomic_ref&) = delete; + + bool is_lock_free() const noexcept; + void store(T* desr, memory_order m = memory_order_seq_cst) const noexcept; + T* load(memory_order m = memory_order_seq_cst) const noexcept; + operator T*() const noexcept; + T* exchange(T* desr, memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_weak(T*& expc, T* desr, memory_order s, memory_order f) const noexcept; + bool compare_exchange_strong(T*& expc, T* desr, + memory_order s, memory_order f) const noexcept; + bool compare_exchange_weak(T*& expc, T* desr, + memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_strong(T*& expc, T* desr, + memory_order m = memory_order_seq_cst) const noexcept; + + T* fetch_add(ptrdiff_t op, memory_order m = memory_order_seq_cst) noexcept; + T* fetch_sub(ptrdiff_t op, memory_order m = memory_order_seq_cst) noexcept; + + T* operator++(int) const noexcept; + T* operator--(int) const noexcept; + T* operator++() const noexcept; + T* operator--() const noexcept; + T* operator+=(ptrdiff_t op) const noexcept; + T* operator-=(ptrdiff_t op) const noexcept; +}; */ + +template +void test_atomic_ref() { + using atomic_ref_t = std::atomic_ref; + assert(atomic_ref_t::is_always_lock_free); + assert(atomic_ref_t::required_alignment >= sizeof(T)); + assert(atomic_ref_t::required_alignment % sizeof(T) == 0); + constexpr T base_pointer = nullptr; + T underlying = base_pointer; + const atomic_ref_t test_ref(underlying); + assert(test_ref.is_lock_free()); + + test_ref.store(base_pointer + 1); + assert(test_ref.load() == base_pointer + 1); + test_ref.store(base_pointer + 2, std::memory_order_relaxed); + assert(test_ref.load(std::memory_order_relaxed) == base_pointer + 2); + + test_ref.store(base_pointer + 3); + assert(static_cast(test_ref) == base_pointer + 3); + + assert(test_ref.exchange(base_pointer + 4) == base_pointer + 3); + assert(test_ref.exchange(base_pointer + 5, std::memory_order_relaxed) == + base_pointer + 4); + assert(test_ref.load() == base_pointer + 5); + + test_compare_exchange_weak(test_ref, base_pointer); + test_compare_exchange_weak(test_ref, + base_pointer); + test_compare_exchange_weak(test_ref, base_pointer); + + test_compare_exchange_strong(test_ref, base_pointer); + test_compare_exchange_strong(test_ref, + base_pointer); + test_compare_exchange_strong(test_ref, + base_pointer); + + test_ref.store(base_pointer); + assert(test_ref.fetch_add(std::ptrdiff_t(1)) == base_pointer); + assert(test_ref.fetch_add(std::ptrdiff_t(2), std::memory_order_relaxed) == + base_pointer + 1); + assert(test_ref.fetch_sub(std::ptrdiff_t(2)) == base_pointer + 3); + assert(test_ref.fetch_sub(std::ptrdiff_t(1), std::memory_order_relaxed) == + base_pointer + 1); + assert(test_ref.load() == base_pointer); + + test_ref.store(base_pointer + 2); + assert(test_ref.load() == base_pointer + 2); + assert(underlying == base_pointer + 2); + + const atomic_ref_t test_ref_2(test_ref); + assert(test_ref_2.load() == base_pointer + 2); + assert((test_ref_2 = base_pointer + 3) == base_pointer + 3); + assert(test_ref.load() == base_pointer + 3); + assert(underlying == base_pointer + 3); + + assert(test_ref_2++ == base_pointer + 3); + assert(test_ref_2.load() == base_pointer + 4); + assert(++test_ref_2 == base_pointer + 5); + assert(test_ref_2.load() == base_pointer + 5); + + assert(test_ref_2-- == base_pointer + 5); + assert(test_ref_2.load() == base_pointer + 4); + assert(--test_ref_2 == base_pointer + 3); + assert(test_ref_2.load() == base_pointer + 3); + + assert((test_ref_2 += std::ptrdiff_t(2)) == base_pointer + 5); + assert((test_ref_2 -= std::ptrdiff_t(1)) == base_pointer + 4); + + assert(test_ref_2.load() == base_pointer + 4); + assert(test_ref.load() == base_pointer + 4); + assert(underlying == base_pointer + 4); } int main(int, char**) { test, int*>(); + test_atomic_ref(); - return 0; + return 0; } diff --git a/libcxx/test/std/atomics/atomics.types.generic/assign_to_atomic_ref_deleted.fail.cpp b/libcxx/test/std/atomics/atomics.types.generic/assign_to_atomic_ref_deleted.fail.cpp new file mode 100644 --- /dev/null +++ b/libcxx/test/std/atomics/atomics.types.generic/assign_to_atomic_ref_deleted.fail.cpp @@ -0,0 +1,48 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// + +// template +// struct atomic_ref +// { +// static constexpr bool is_always_lock_free; +// static constexpr size_t required_alignment; +// +// explicit atomic_ref(T& obj) noexcept; +// atomic_ref(const atomic_ref&) noexcept; +// T operator=(T desired) const noexcept; +// atomic_ref& operator=(const atomic_ref&) = delete; +// +// bool is_lock_free() const noexcept; +// void store(T desr, memory_order m = memory_order_seq_cst) const noexcept; +// T load(memory_order m = memory_order_seq_cst) const noexcept; +// operator T() const noexcept; +// T exchange(T desr, memory_order m = memory_order_seq_cst) const noexcept; +// bool compare_exchange_weak(T& expc, T desr, memory_order s, memory_order f) const noexcept; +// bool compare_exchange_strong(T& expc, T desr, +// memory_order s, memory_order f) const noexcept; +// bool compare_exchange_weak(T& expc, T desr, +// memory_order m = memory_order_seq_cst) const noexcept; +// bool compare_exchange_strong(T& expc, T desr, +// memory_order m = memory_order_seq_cst) const noexcept; +// }; + +#include +#include +#include + +int main(int, char**) { + int val = 1; + std::atomic_ref t0(val); + std::atomic_ref t1(val); + t0 = + t1; // expected-error {{object of type 'std::atomic_ref' cannot be assigned because its copy assignment operator is implicitly deleted}} + + return 0; +} diff --git a/libcxx/test/std/atomics/atomics.types.generic/integral.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/integral.pass.cpp --- a/libcxx/test/std/atomics/atomics.types.generic/integral.pass.cpp +++ b/libcxx/test/std/atomics/atomics.types.generic/integral.pass.cpp @@ -92,6 +92,7 @@ #include #include "test_macros.h" +#include "test_compare_exchange_helpers.h" template void @@ -166,6 +167,154 @@ do_test(); } +/* +template <> +struct atomic_ref +{ + static constexpr bool is_always_lock_free; + static constexpr size_t required_alignment; + + bool is_lock_free() const noexcept; + void store(T desr, memory_order m = memory_order_seq_cst) const noexcept; + integral load(memory_order m = memory_order_seq_cst) const noexcept; + operator integral() const noexcept; + integral exchange(integral desr, memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_weak(integral& expc, integral desr, memory_order s, memory_order f) const noexcept; + bool compare_exchange_strong(integral& expc, integral desr, + memory_order s, memory_order f) const noexcept; + bool compare_exchange_weak(integral& expc, integral desr, + memory_order m = memory_order_seq_cst) const noexcept; + bool compare_exchange_strong(integral& expc, integral desr, + memory_order m = memory_order_seq_cst) const noexcept; + + integral fetch_add(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_sub(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_and(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_or(integral op, memory_order m = memory_order_seq_cst) const noexcept; + integral fetch_xor(integral op, memory_order m = memory_order_seq_cst) const noexcept; + + explicit atomic_ref(integral& obj) noexcept; + atomic_ref(const atomic_ref&) noexcept; + integral operator=(integral desired) const noexcept; + atomic_ref& operator=(const atomic_ref&) = delete; + + integral operator++(int) const noexcept; + integral operator--(int) const noexcept; + integral operator++() const noexcept; + integral operator--() const noexcept; + integral operator+=(integral op) const noexcept; + integral operator-=(integral op) const noexcept; + integral operator&=(integral op) const noexcept; + integral operator|=(integral op) const noexcept; + integral operator^=(integral op) const noexcept; +}; +*/ + +template +void test_atomic_ref() { + constexpr T initial_value = 42; + T underlying(42); + using atomic_ref_t = std::atomic_ref; + const atomic_ref_t test_ref(underlying); + + const auto is_lock_free = test_ref.is_lock_free(); + (void)is_lock_free; + constexpr auto is_always_lock_free = atomic_ref_t::is_always_lock_free; + (void)is_always_lock_free; + constexpr auto required_alignment = atomic_ref_t::required_alignment; + assert(required_alignment >= sizeof(T)); + assert(required_alignment % sizeof(T) == 0); + assert(test_ref.load(std::memory_order_relaxed) == initial_value); + + assert(test_ref.load() == initial_value); + test_ref.store(initial_value + T(1), std::memory_order_relaxed); + assert(test_ref.load(std::memory_order_relaxed) == initial_value + T(1)); + test_ref.store(initial_value + T(2)); + assert(test_ref.load() == initial_value + T(2)); + assert(static_cast(test_ref) == initial_value + T(2)); + + assert(test_ref.exchange(initial_value) == initial_value + T(2)); + assert(test_ref.exchange(initial_value - T(1), std::memory_order_relaxed) == + initial_value); + assert(test_ref.load() == initial_value - T(1)); + + test_compare_exchange_weak(test_ref, initial_value); + test_compare_exchange_weak(test_ref, + initial_value); + test_compare_exchange_weak(test_ref, + initial_value); + + test_compare_exchange_strong(test_ref, initial_value); + test_compare_exchange_strong(test_ref, + initial_value); + test_compare_exchange_strong(test_ref, + initial_value); + + test_ref.store(initial_value + T(7)); + assert(test_ref.fetch_add(T(3)) == initial_value + T(7)); + assert(test_ref.load() == initial_value + T(10)); + assert(test_ref.fetch_add(T(2), std::memory_order_relaxed) == + initial_value + T(10)); + assert(test_ref.load() == initial_value + T(12)); + + assert(test_ref.fetch_sub(T(2)) == initial_value + T(12)); + assert(test_ref.load() == initial_value + T(10)); + assert(test_ref.fetch_sub(T(3), std::memory_order_relaxed) == + initial_value + T(10)); + assert(test_ref.load() == initial_value + T(7)); + + test_ref.store(T(7)); + assert(test_ref.fetch_and(T(3)) == T(7)); + assert(test_ref.load() == T(3)); + assert(test_ref.fetch_and(T(1), std::memory_order_relaxed) == T(3)); + assert(test_ref.load() == T(1)); + + test_ref.store(T(0)); + assert(test_ref.fetch_or(T(1)) == T(0)); + assert(test_ref.load() == T(1)); + assert(test_ref.fetch_or(T(2), std::memory_order_relaxed) == T(1)); + assert(test_ref.load() == T(3)); + + test_ref.store(T(128) | T(32) | T(8)); + assert(test_ref.fetch_xor(T(64) | T(16) | T(8)) == (T(128) | T(32) | T(8))); + assert(test_ref.load() == (T(128) | T(64) | T(32) | T(16))); + assert( + test_ref.fetch_xor(T(128) | T(64) | T(16), std::memory_order_relaxed) == + (T(128) | T(64) | T(32) | T(16))); + assert(test_ref.load() == T(32)); + + assert(underlying == T(32)); + const atomic_ref_t test_ref_2(test_ref); + assert(test_ref_2.load() == T(32)); + assert((test_ref_2 = T(64)) == T(64)); + assert(test_ref.load() == T(64)); + assert(test_ref_2.load() == T(64)); + + assert(test_ref_2++ == T(64)); + assert(static_cast(test_ref_2) == T(65)); + assert(++test_ref_2 == T(66)); + assert(test_ref_2.load() == T(66)); + assert(test_ref_2-- == T(66)); + assert(test_ref_2.load() == T(65)); + assert(--test_ref_2 == T(64)); + assert(test_ref_2.load() == T(64)); + + assert((test_ref_2 += T(16)) == T(80)); + assert((test_ref_2 -= T(32)) == T(48)); + assert(test_ref_2.load() == T(48)); + assert((test_ref_2 = T(31)) == T(31)); + assert((test_ref_2 &= T(15)) == T(15)); + assert(test_ref_2.load() == T(15)); + assert((test_ref_2 |= T(16)) == T(31)); + assert(test_ref_2.load() == T(31)); + constexpr T expected_final_value = T(1) | T(2) | T(4) | T(8) | T(64); + assert((test_ref_2 ^= (T(16) | T(64))) == expected_final_value); + assert(test_ref_2.load() == expected_final_value); + assert(test_ref.load() == expected_final_value); + assert(underlying == expected_final_value); +} int main(int, char**) { @@ -224,5 +373,31 @@ test(); test(); - return 0; + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); +#ifndef _LIBCPP_HAS_NO_UNICODE_CHARS + test_atomic_ref(); + test_atomic_ref(); +#endif // _LIBCPP_HAS_NO_UNICODE_CHARS + test_atomic_ref(); + + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + test_atomic_ref(); + + return 0; } diff --git a/libcxx/test/std/atomics/atomics.types.generic/test_compare_exchange_helpers.h b/libcxx/test/std/atomics/atomics.types.generic/test_compare_exchange_helpers.h new file mode 100644 --- /dev/null +++ b/libcxx/test/std/atomics/atomics.types.generic/test_compare_exchange_helpers.h @@ -0,0 +1,35 @@ +#pragma once +#include + +template +void test_compare_exchange_weak(const std::atomic_ref test_ref, + const T initial_value) { + test_ref.store(initial_value); + const T desired_value = T(initial_value + 2); + T expected = T(initial_value + 3); + assert(!test_ref.compare_exchange_weak(expected, desired_value, + memory_orders...)); + assert(expected == initial_value); + assert(test_ref.load() == initial_value); + while (!test_ref.compare_exchange_weak(expected, desired_value, + memory_orders...)) { + assert(expected == initial_value); + assert(test_ref.load() == initial_value); + } + assert(test_ref.load() == desired_value); +} + +template +void test_compare_exchange_strong(const std::atomic_ref test_ref, + const T initial_value) { + test_ref.store(initial_value); + const T desired_value = T(initial_value + 2); + T expected = T(initial_value + 3); + assert(!test_ref.compare_exchange_strong(expected, desired_value, + memory_orders...)); + assert(expected == initial_value); + assert(test_ref.load() == initial_value); + assert(test_ref.compare_exchange_strong(expected, desired_value, + memory_orders...)); + assert(test_ref.load() == desired_value); +} diff --git a/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp --- a/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp +++ b/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp @@ -48,6 +48,31 @@ // T operator=(T) noexcept; // }; +// template +// struct atomic_ref +// { +// static constexpr bool is_always_lock_free; +// static constexpr size_t required_alignment; +// +// explicit atomic_ref(T& obj) noexcept; +// atomic_ref(const atomic_ref&) noexcept; +// T operator=(T desired) const noexcept; +// atomic_ref& operator=(const atomic_ref&) = delete; +// +// bool is_lock_free() const noexcept; +// void store(T desr, memory_order m = memory_order_seq_cst) const noexcept; +// T load(memory_order m = memory_order_seq_cst) const noexcept; +// operator T() const noexcept; +// T exchange(T desr, memory_order m = memory_order_seq_cst) const noexcept; +// bool compare_exchange_weak(T& expc, T desr, memory_order s, memory_order f) const noexcept; +// bool compare_exchange_strong(T& expc, T desr, +// memory_order s, memory_order f) const noexcept; +// bool compare_exchange_weak(T& expc, T desr, +// memory_order m = memory_order_seq_cst) const noexcept; +// bool compare_exchange_strong(T& expc, T desr, +// memory_order m = memory_order_seq_cst) const noexcept; +// }; + #include #include #include @@ -64,6 +89,7 @@ template void test ( T t ) { std::atomic t0(t); + std::atomic_ref ref_t0(t); } int main(int, char**) diff --git a/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable_ref.fail.cpp b/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable_ref.fail.cpp new file mode 100644 --- /dev/null +++ b/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable_ref.fail.cpp @@ -0,0 +1,58 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +// + +// template +// struct atomic_ref +// { +// static constexpr bool is_always_lock_free; +// static constexpr size_t required_alignment; + +// bool is_lock_free() const noexcept; +// void store(T desr, memory_order m = memory_order_seq_cst) const noexcept; +// T load(memory_order m = memory_order_seq_cst) const noexcept; +// operator T() const noexcept; +// T exchange(T desr, memory_order m = memory_order_seq_cst) const noexcept; +// bool compare_exchange_weak(T& expc, T desr, memory_order s, memory_order f) const noexcept; +// bool compare_exchange_strong(T& expc, T desr, +// memory_order s, memory_order f) const noexcept; +// bool compare_exchange_weak(T& expc, T desr, +// memory_order m = memory_order_seq_cst) const noexcept; +// bool compare_exchange_strong(T& expc, T desr, +// memory_order m = memory_order_seq_cst) const noexcept; + +// explicit atomic_ref(T& obj) noexcept; +// atomic_ref(const atomic_ref&) noexcept; +// T operator=(T desired) const noexcept; +// atomic_ref& operator=(const atomic_ref&) = delete; +// }; + +#include +#include +#include +#include // for thread_id +#include // for nanoseconds + +struct NotTriviallyCopyable { + NotTriviallyCopyable(int i) : i_(i) {} + NotTriviallyCopyable(const NotTriviallyCopyable& rhs) : i_(rhs.i_) {} + int i_; +}; + +template +void test(T t) { + std::atomic_ref t0( + t); // expected-error@atomic:* {{static_assert failed due to requirement 'is_trivially_copyable::value' "std::atomic_ref<_Tp> requires that '_Tp' be a trivially copyable type"}} +} + +int main(int, char**) { + test(NotTriviallyCopyable(42)); + + return 0; +}