diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt --- a/libcxx/include/CMakeLists.txt +++ b/libcxx/include/CMakeLists.txt @@ -618,6 +618,9 @@ __ranges/zip_view.h __split_buffer __std_mbstate_t.h + __stop_token/atomic_unique_lock.h + __stop_token/intrusive_list.h + __stop_token/intrusive_shared_ptr.h __string/char_traits.h __string/constexpr_c_functions.h __string/extern_template_lists.h diff --git a/libcxx/include/__stop_token/atomic_unique_lock.h b/libcxx/include/__stop_token/atomic_unique_lock.h new file mode 100644 --- /dev/null +++ b/libcxx/include/__stop_token/atomic_unique_lock.h @@ -0,0 +1,135 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_GUARD_H +#define _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_GUARD_H + +#include <__config> +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if _LIBCPP_STD_VER >= 20 +/** + * This class implements an RAII unique_lock without a mutex. + * It uses std::atomic where State contains a lock bit + * and might contain other data. + */ +template +class __atomic_unique_lock { + std::atomic<_State>& __state_; + bool __is_locked_; + +public: + _LIBCPP_HIDE_FROM_ABI explicit __atomic_unique_lock(std::atomic<_State>& __state) noexcept + : __state_(__state), __is_locked_(true) { + __lock(); + } + + template + _LIBCPP_HIDE_FROM_ABI __atomic_unique_lock(std::atomic<_State>& __state, _Pred&& __give_up_locking) noexcept + : __state_(__state), __is_locked_(false) { + __is_locked_ = __lock_impl(__give_up_locking, __set_locked_bit, std::memory_order_acquire); + } + + template + _LIBCPP_HIDE_FROM_ABI __atomic_unique_lock( + std::atomic<_State>& __state, + _Pred&& __give_up_locking, + _UnaryFunction&& __state_after_lock, + std::memory_order __locked_ordering) noexcept + : __state_(__state), __is_locked_(false) { + __is_locked_ = __lock_impl(__give_up_locking, __state_after_lock, __locked_ordering); + } + + __atomic_unique_lock(const __atomic_unique_lock&) = delete; + __atomic_unique_lock(__atomic_unique_lock&&) = delete; + __atomic_unique_lock& operator=(const __atomic_unique_lock&) = delete; + __atomic_unique_lock& operator=(__atomic_unique_lock&&) = delete; + + _LIBCPP_HIDE_FROM_ABI ~__atomic_unique_lock() { + if (__is_locked_) { + __unlock(); + } + } + + _LIBCPP_HIDE_FROM_ABI bool __owns_lock() const noexcept { return __is_locked_; } + + _LIBCPP_HIDE_FROM_ABI void __lock() noexcept { + const auto __never_give_up_locking = [](_State) { return false; }; + // std::memory_order_acquire because we'd like to make sure that all the read operations after the lock can read the + // up-to-date values. + __lock_impl(__never_give_up_locking, __set_locked_bit, std::memory_order_acquire); + __is_locked_ = true; + } + + _LIBCPP_HIDE_FROM_ABI void __unlock() noexcept { + // unset the _LockedBit. `memory_order_release` because we need to make sure all the write operations before calling + // `__unlock` will be made visible to other threads + __state_.fetch_and(~_LockedBit, std::memory_order_release); + __state_.notify_all(); + __is_locked_ = false; + } + +private: + template + _LIBCPP_HIDE_FROM_ABI bool __lock_impl( + _Pred&& __give_up_locking, // while trying to lock the state, if the predicate returns true, give up locking and return + _UnaryFunction&& __state_after_lock, + std::memory_order __locked_ordering) noexcept { + // At this stage, until we exit the inner while loop, other than the atomic state, we are not reading any order + // dependent values that is written on other threads, or writing anything that needs to be seen on other threads. + // Therefore `memory_order_relaxed` is enough. + _State __current_state = __state_.load(std::memory_order_relaxed); + do { + while (true) { + if (__give_up_locking(__current_state)) { + // user provided early return condition. fail to lock + return false; + } else if ((__current_state & _LockedBit) != 0) { + // another thread has locked the state, we need to wait + __state_.wait(__current_state, std::memory_order_relaxed); + // when it is woken up by notifyAll or spuriously, the __state_ + // might have changed. reload the state + // Note that the new state's _LockedBit may or may not equal to 0 + __current_state = __state_.load(std::memory_order_relaxed); + } else { + // at least for now, it is not locked. we can try `compare_exchange_weak` to lock it. + // Note that the variable `__current_state`'s lock bit has to be 0 at this point. + break; + } + } + } while (!__state_.compare_exchange_weak( + __current_state, // if __state_ has the same value of __current_state, lock bit must be zero before exchange and + // we are good to lock/exchange and return. If _state has a different value, because other + // threads locked it between the `break` statement above and this statement, exchange will fail + // and go back to the inner while loop above. + __state_after_lock(__current_state), // state after lock. Usually it should be __current_state | _LockedBit. + // Some use cases need to set other bits at the same time as an atomic + // operation therefore we accept a function + __locked_ordering, // sucessful exchange order. Usually it should be std::memory_order_acquire. + // Some use cases need more strict ordering therefore we accept it as a parameter + std::memory_order_relaxed // fail to exchange order. We don't need any ordering as we are going back to the + // inner while loop + )); + return true; + } + + _LIBCPP_HIDE_FROM_ABI static constexpr auto __set_locked_bit = [](_State __state) { return __state | _LockedBit; }; +}; + +#endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_GUARD_H diff --git a/libcxx/include/__stop_token/intrusive_list.h b/libcxx/include/__stop_token/intrusive_list.h new file mode 100644 --- /dev/null +++ b/libcxx/include/__stop_token/intrusive_list.h @@ -0,0 +1,74 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___STOP_TOKEN_INTRUSIVE_LIST_H +#define _LIBCPP___STOP_TOKEN_INTRUSIVE_LIST_H + +#include <__config> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if _LIBCPP_STD_VER >= 20 + +template +struct __intrusive_node_base { + _Derived* __next_ = nullptr; + _Derived* __prev_ = nullptr; +}; + +template +struct __intrusive_list { + _LIBCPP_HIDE_FROM_ABI bool __empty() const noexcept { return __head_ == nullptr; } + + _LIBCPP_HIDE_FROM_ABI void __push_front(_Node* __node) noexcept { + __node->__next_ = __head_; + if (__head_) { + __head_->__prev_ = __node; + } + __head_ = __node; + } + + _LIBCPP_HIDE_FROM_ABI _Node* __pop_front() noexcept { + _Node* __front = __head_; + __head_ = __head_->__next_; + if (__head_) { + __head_->__prev_ = nullptr; + } + // OK not to set __front->__next_ = nullptr as __front is not part of the list anymore + return __front; + } + + _LIBCPP_HIDE_FROM_ABI void __remove(_Node* __node) noexcept { + if (__node->__prev_) { + // prev exists, set its next to our next to skip __node + __node->__prev_->__next_ = __node->__next_; + if (__node->__next_) { + __node->__next_->__prev_ = __node->__prev_; + } + } else { + _LIBCPP_ASSERT(__node == __head_, "Node to be removed has no prev node, so it has to be the head"); + __pop_front(); + } + } + + _LIBCPP_HIDE_FROM_ABI bool __is_head(_Node* __node) noexcept { return __node == __head_; } + +private: + _Node* __head_ = nullptr; +}; + +#endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___STOP_TOKEN_INTRUSIVE_LIST_H diff --git a/libcxx/include/__stop_token/intrusive_shared_ptr.h b/libcxx/include/__stop_token/intrusive_shared_ptr.h new file mode 100644 --- /dev/null +++ b/libcxx/include/__stop_token/intrusive_shared_ptr.h @@ -0,0 +1,127 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___STOP_TOKEN_INTRUSIVE_SHARED_PTR_H +#define _LIBCPP___STOP_TOKEN_INTRUSIVE_SHARED_PTR_H + +#include <__atomic/memory_order.h> +#include <__config> +#include <__type_traits/is_reference.h> +#include <__utility/move.h> +#include <__utility/swap.h> +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if _LIBCPP_STD_VER >= 20 + +// Specialize __intrusive_shared_ptr_traits<_Tp> and implements +// +// static std::atomic& __get_atomic_ref_count(_Tp&); +// +// where U must be an integral type +template +struct __intrusive_shared_ptr_traits; + +// A reference counting shared_ptr for types whose reference counter +// is stored inside the class _Tp itself. +// When the reference count goes to zero, the destructor of _Tp will be called +template +struct __intrusive_shared_ptr { + _LIBCPP_HIDE_FROM_ABI __intrusive_shared_ptr() = default; + + _LIBCPP_HIDE_FROM_ABI explicit __intrusive_shared_ptr(_Tp* __raw_ptr) : __raw_ptr_(__raw_ptr) { + if (__raw_ptr_) + __increment_ref_count(*__raw_ptr_); + } + + _LIBCPP_HIDE_FROM_ABI __intrusive_shared_ptr(const __intrusive_shared_ptr& __other) noexcept + : __raw_ptr_(__other.__raw_ptr_) { + if (__raw_ptr_) + __increment_ref_count(*__raw_ptr_); + } + + _LIBCPP_HIDE_FROM_ABI __intrusive_shared_ptr(__intrusive_shared_ptr&& __other) noexcept + : __raw_ptr_(__other.__raw_ptr_) { + __other.__raw_ptr_ = nullptr; + } + + _LIBCPP_HIDE_FROM_ABI __intrusive_shared_ptr& operator=(const __intrusive_shared_ptr& __other) noexcept { + if (__other.__raw_ptr_ != __raw_ptr_) { + if (__other.__raw_ptr_) { + __increment_ref_count(*__other.__raw_ptr_); + } + if (__raw_ptr_) { + __decrement_ref_count(*__raw_ptr_); + } + __raw_ptr_ = __other.__raw_ptr_; + } + return *this; + } + + _LIBCPP_HIDE_FROM_ABI __intrusive_shared_ptr& operator=(__intrusive_shared_ptr&& __other) noexcept { + __intrusive_shared_ptr(std::move(__other)).swap(*this); + return *this; + } + + _LIBCPP_HIDE_FROM_ABI ~__intrusive_shared_ptr() { + if (__raw_ptr_) { + __decrement_ref_count(*__raw_ptr_); + } + } + + _LIBCPP_HIDE_FROM_ABI _Tp* operator->() const noexcept { return __raw_ptr_; } + _LIBCPP_HIDE_FROM_ABI _Tp& operator*() const noexcept { return *__raw_ptr_; } + _LIBCPP_HIDE_FROM_ABI explicit operator bool() const noexcept { return __raw_ptr_ != nullptr; } + + _LIBCPP_HIDE_FROM_ABI void swap(__intrusive_shared_ptr& __other) { std::swap(__raw_ptr_, __other.__raw_ptr_); } + + _LIBCPP_HIDE_FROM_ABI friend void swap(__intrusive_shared_ptr& __lhs, __intrusive_shared_ptr& __rhs) { + __lhs.swap(__rhs); + } + + _LIBCPP_HIDE_FROM_ABI friend bool constexpr + operator==(const __intrusive_shared_ptr&, const __intrusive_shared_ptr&) = default; + + _LIBCPP_HIDE_FROM_ABI friend bool constexpr operator==(const __intrusive_shared_ptr& __ptr, std::nullptr_t) { + return __ptr.__raw_ptr_ == nullptr; + } + +private: + _Tp* __raw_ptr_ = nullptr; + + // the memory order for increment/decrement the counter is the same for shared_ptr + // increment is relaxed and decrement is acq_rel + _LIBCPP_HIDE_FROM_ABI static void __increment_ref_count(_Tp& __obj) { + __get_atomic_ref_count(__obj).fetch_add(1, std::memory_order_relaxed); + } + + _LIBCPP_HIDE_FROM_ABI static void __decrement_ref_count(_Tp& __obj) { + if (__get_atomic_ref_count(__obj).fetch_sub(1, std::memory_order_acq_rel) == 1) { + delete &__obj; + } + } + + _LIBCPP_HIDE_FROM_ABI static decltype(auto) __get_atomic_ref_count(_Tp& __obj) { + using __ret_type = decltype(__intrusive_shared_ptr_traits<_Tp>::__get_atomic_ref_count(__obj)); + static_assert( + std::is_reference_v<__ret_type>, "__get_atomic_ref_count should return a reference to the atomic counter"); + return __intrusive_shared_ptr_traits<_Tp>::__get_atomic_ref_count(__obj); + } +}; + +#endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___STOP_TOKEN_INTRUSIVE_SHARED_PTR_H