diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt --- a/libcxx/include/CMakeLists.txt +++ b/libcxx/include/CMakeLists.txt @@ -618,6 +618,9 @@ __ranges/zip_view.h __split_buffer __std_mbstate_t.h + __stop_token/atomic_unique_lock.h + __stop_token/intrusive_list.h + __stop_token/intrusive_shared_ptr.h __string/char_traits.h __string/constexpr_c_functions.h __string/extern_template_lists.h diff --git a/libcxx/include/__stop_token/atomic_unique_lock.h b/libcxx/include/__stop_token/atomic_unique_lock.h new file mode 100644 --- /dev/null +++ b/libcxx/include/__stop_token/atomic_unique_lock.h @@ -0,0 +1,136 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_GUARD_H +#define _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_GUARD_H + +#include <__config> +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if _LIBCPP_STD_VER >= 20 +/** + * This class implements an RAII unique_lock without a mutex. + * It uses std::atomic where State contains a lock bit + * and might contain other data. + */ +template +class __atomic_unique_lock { + std::atomic<_State>& __state_; + bool __is_locked_; + +public: + _LIBCPP_HIDE_FROM_ABI explicit __atomic_unique_lock(std::atomic<_State>& __state) noexcept + : __state_(__state), __is_locked_(true) { + __lock(); + } + + template + _LIBCPP_HIDE_FROM_ABI __atomic_unique_lock(std::atomic<_State>& __state, _Pred&& __give_up_locking) noexcept + : __state_(__state), __is_locked_(false) { + __is_locked_ = __lock_impl(__give_up_locking, __set_locked_bit, std::memory_order_acquire); + } + + template + _LIBCPP_HIDE_FROM_ABI __atomic_unique_lock( + std::atomic<_State>& __state, + _Pred&& __give_up_locking, + _UnaryFunction&& __state_after_lock, + std::memory_order __locked_ordering) noexcept + : __state_(__state), __is_locked_(false) { + __is_locked_ = __lock_impl(__give_up_locking, __state_after_lock, __locked_ordering); + } + + __atomic_unique_lock(const __atomic_unique_lock&) = delete; + __atomic_unique_lock(__atomic_unique_lock&&) = delete; + __atomic_unique_lock& operator=(const __atomic_unique_lock&) = delete; + __atomic_unique_lock& operator=(__atomic_unique_lock&&) = delete; + + _LIBCPP_HIDE_FROM_ABI ~__atomic_unique_lock() { + if (__is_locked_) { + __unlock(); + } + } + + _LIBCPP_HIDE_FROM_ABI bool __owns_lock() const noexcept { return __is_locked_; } + + _LIBCPP_HIDE_FROM_ABI void __lock() noexcept { + const auto __never_give_up_locking = [](_State) { return false; }; + // std::memory_order_acquire because we'd like to make sure that all the read operations after the lock can read the + // up-to-date values. + __lock_impl(__never_give_up_locking, __set_locked_bit, std::memory_order_acquire); + __is_locked_ = true; + } + + _LIBCPP_HIDE_FROM_ABI void __unlock() noexcept { + // unset the _LockedBit. `memory_order_release` because we need to make sure all the write operations before calling + // `__unlock` will be made visible to other threads + __state_.fetch_and(~_LockedBit, std::memory_order_release); + __state_.notify_all(); + __is_locked_ = false; + } + +private: + template + _LIBCPP_HIDE_FROM_ABI bool + __lock_impl(_Pred&& __give_up_locking, // while trying to lock the state, if the predicate returns true, give up + // locking and return + _UnaryFunction&& __state_after_lock, + std::memory_order __locked_ordering) noexcept { + // At this stage, until we exit the inner while loop, other than the atomic state, we are not reading any order + // dependent values that is written on other threads, or writing anything that needs to be seen on other threads. + // Therefore `memory_order_relaxed` is enough. + _State __current_state = __state_.load(std::memory_order_relaxed); + do { + while (true) { + if (__give_up_locking(__current_state)) { + // user provided early return condition. fail to lock + return false; + } else if ((__current_state & _LockedBit) != 0) { + // another thread has locked the state, we need to wait + __state_.wait(__current_state, std::memory_order_relaxed); + // when it is woken up by notifyAll or spuriously, the __state_ + // might have changed. reload the state + // Note that the new state's _LockedBit may or may not equal to 0 + __current_state = __state_.load(std::memory_order_relaxed); + } else { + // at least for now, it is not locked. we can try `compare_exchange_weak` to lock it. + // Note that the variable `__current_state`'s lock bit has to be 0 at this point. + break; + } + } + } while (!__state_.compare_exchange_weak( + __current_state, // if __state_ has the same value of __current_state, lock bit must be zero before exchange and + // we are good to lock/exchange and return. If _state has a different value, because other + // threads locked it between the `break` statement above and this statement, exchange will fail + // and go back to the inner while loop above. + __state_after_lock(__current_state), // state after lock. Usually it should be __current_state | _LockedBit. + // Some use cases need to set other bits at the same time as an atomic + // operation therefore we accept a function + __locked_ordering, // sucessful exchange order. Usually it should be std::memory_order_acquire. + // Some use cases need more strict ordering therefore we accept it as a parameter + std::memory_order_relaxed // fail to exchange order. We don't need any ordering as we are going back to the + // inner while loop + )); + return true; + } + + _LIBCPP_HIDE_FROM_ABI static constexpr auto __set_locked_bit = [](_State __state) { return __state | _LockedBit; }; +}; + +#endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___STOP_TOKEN_ATOMIC_UNIQUE_GUARD_H diff --git a/libcxx/include/__stop_token/intrusive_list.h b/libcxx/include/__stop_token/intrusive_list.h new file mode 100644 --- /dev/null +++ b/libcxx/include/__stop_token/intrusive_list.h @@ -0,0 +1,75 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___STOP_TOKEN_INTRUSIVE_LIST_H +#define _LIBCPP___STOP_TOKEN_INTRUSIVE_LIST_H + +#include <__assert> +#include <__config> + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if _LIBCPP_STD_VER >= 20 + +template +struct __intrusive_node_base { + _Derived* __next_ = nullptr; + _Derived* __prev_ = nullptr; +}; + +template +struct __intrusive_list { + _LIBCPP_HIDE_FROM_ABI bool __empty() const noexcept { return __head_ == nullptr; } + + _LIBCPP_HIDE_FROM_ABI void __push_front(_Node* __node) noexcept { + __node->__next_ = __head_; + if (__head_) { + __head_->__prev_ = __node; + } + __head_ = __node; + } + + _LIBCPP_HIDE_FROM_ABI _Node* __pop_front() noexcept { + _Node* __front = __head_; + __head_ = __head_->__next_; + if (__head_) { + __head_->__prev_ = nullptr; + } + // OK not to set __front->__next_ = nullptr as __front is not part of the list anymore + return __front; + } + + _LIBCPP_HIDE_FROM_ABI void __remove(_Node* __node) noexcept { + if (__node->__prev_) { + // prev exists, set its next to our next to skip __node + __node->__prev_->__next_ = __node->__next_; + if (__node->__next_) { + __node->__next_->__prev_ = __node->__prev_; + } + } else { + _LIBCPP_ASSERT(__node == __head_, "Node to be removed has no prev node, so it has to be the head"); + __pop_front(); + } + } + + _LIBCPP_HIDE_FROM_ABI bool __is_head(_Node* __node) noexcept { return __node == __head_; } + +private: + _Node* __head_ = nullptr; +}; + +#endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___STOP_TOKEN_INTRUSIVE_LIST_H diff --git a/libcxx/include/__stop_token/intrusive_shared_ptr.h b/libcxx/include/__stop_token/intrusive_shared_ptr.h new file mode 100644 --- /dev/null +++ b/libcxx/include/__stop_token/intrusive_shared_ptr.h @@ -0,0 +1,127 @@ +// -*- C++ -*- +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef _LIBCPP___STOP_TOKEN_INTRUSIVE_SHARED_PTR_H +#define _LIBCPP___STOP_TOKEN_INTRUSIVE_SHARED_PTR_H + +#include <__atomic/memory_order.h> +#include <__config> +#include <__type_traits/is_reference.h> +#include <__utility/move.h> +#include <__utility/swap.h> +#include + +#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER) +# pragma GCC system_header +#endif + +_LIBCPP_BEGIN_NAMESPACE_STD + +#if _LIBCPP_STD_VER >= 20 + +// Specialize __intrusive_shared_ptr_traits<_Tp> and implements +// +// static std::atomic& __get_atomic_ref_count(_Tp&); +// +// where U must be an integral type +template +struct __intrusive_shared_ptr_traits; + +// A reference counting shared_ptr for types whose reference counter +// is stored inside the class _Tp itself. +// When the reference count goes to zero, the destructor of _Tp will be called +template +struct __intrusive_shared_ptr { + _LIBCPP_HIDE_FROM_ABI __intrusive_shared_ptr() = default; + + _LIBCPP_HIDE_FROM_ABI explicit __intrusive_shared_ptr(_Tp* __raw_ptr) : __raw_ptr_(__raw_ptr) { + if (__raw_ptr_) + __increment_ref_count(*__raw_ptr_); + } + + _LIBCPP_HIDE_FROM_ABI __intrusive_shared_ptr(const __intrusive_shared_ptr& __other) noexcept + : __raw_ptr_(__other.__raw_ptr_) { + if (__raw_ptr_) + __increment_ref_count(*__raw_ptr_); + } + + _LIBCPP_HIDE_FROM_ABI __intrusive_shared_ptr(__intrusive_shared_ptr&& __other) noexcept + : __raw_ptr_(__other.__raw_ptr_) { + __other.__raw_ptr_ = nullptr; + } + + _LIBCPP_HIDE_FROM_ABI __intrusive_shared_ptr& operator=(const __intrusive_shared_ptr& __other) noexcept { + if (__other.__raw_ptr_ != __raw_ptr_) { + if (__other.__raw_ptr_) { + __increment_ref_count(*__other.__raw_ptr_); + } + if (__raw_ptr_) { + __decrement_ref_count(*__raw_ptr_); + } + __raw_ptr_ = __other.__raw_ptr_; + } + return *this; + } + + _LIBCPP_HIDE_FROM_ABI __intrusive_shared_ptr& operator=(__intrusive_shared_ptr&& __other) noexcept { + __intrusive_shared_ptr(std::move(__other)).swap(*this); + return *this; + } + + _LIBCPP_HIDE_FROM_ABI ~__intrusive_shared_ptr() { + if (__raw_ptr_) { + __decrement_ref_count(*__raw_ptr_); + } + } + + _LIBCPP_HIDE_FROM_ABI _Tp* operator->() const noexcept { return __raw_ptr_; } + _LIBCPP_HIDE_FROM_ABI _Tp& operator*() const noexcept { return *__raw_ptr_; } + _LIBCPP_HIDE_FROM_ABI explicit operator bool() const noexcept { return __raw_ptr_ != nullptr; } + + _LIBCPP_HIDE_FROM_ABI void swap(__intrusive_shared_ptr& __other) { std::swap(__raw_ptr_, __other.__raw_ptr_); } + + _LIBCPP_HIDE_FROM_ABI friend void swap(__intrusive_shared_ptr& __lhs, __intrusive_shared_ptr& __rhs) { + __lhs.swap(__rhs); + } + + _LIBCPP_HIDE_FROM_ABI friend bool constexpr + operator==(const __intrusive_shared_ptr&, const __intrusive_shared_ptr&) = default; + + _LIBCPP_HIDE_FROM_ABI friend bool constexpr operator==(const __intrusive_shared_ptr& __ptr, std::nullptr_t) { + return __ptr.__raw_ptr_ == nullptr; + } + +private: + _Tp* __raw_ptr_ = nullptr; + + // the memory order for increment/decrement the counter is the same for shared_ptr + // increment is relaxed and decrement is acq_rel + _LIBCPP_HIDE_FROM_ABI static void __increment_ref_count(_Tp& __obj) { + __get_atomic_ref_count(__obj).fetch_add(1, std::memory_order_relaxed); + } + + _LIBCPP_HIDE_FROM_ABI static void __decrement_ref_count(_Tp& __obj) { + if (__get_atomic_ref_count(__obj).fetch_sub(1, std::memory_order_acq_rel) == 1) { + delete &__obj; + } + } + + _LIBCPP_HIDE_FROM_ABI static decltype(auto) __get_atomic_ref_count(_Tp& __obj) { + using __ret_type = decltype(__intrusive_shared_ptr_traits<_Tp>::__get_atomic_ref_count(__obj)); + static_assert( + std::is_reference_v<__ret_type>, "__get_atomic_ref_count should return a reference to the atomic counter"); + return __intrusive_shared_ptr_traits<_Tp>::__get_atomic_ref_count(__obj); + } +}; + +#endif // _LIBCPP_STD_VER >= 20 + +_LIBCPP_END_NAMESPACE_STD + +#endif // _LIBCPP___STOP_TOKEN_INTRUSIVE_SHARED_PTR_H diff --git a/libcxx/include/libcxx.imp b/libcxx/include/libcxx.imp --- a/libcxx/include/libcxx.imp +++ b/libcxx/include/libcxx.imp @@ -40,6 +40,7 @@ { include: [ "@<__pstl/.*>", "private", "", "public" ] }, { include: [ "@<__random/.*>", "private", "", "public" ] }, { include: [ "@<__ranges/.*>", "private", "", "public" ] }, + { include: [ "@<__stop_token/.*>", "private", "", "public" ] }, { include: [ "@<__string/.*>", "private", "", "public" ] }, { include: [ "@<__support/.*>", "private", "", "public" ] }, { include: [ "@<__system_error/.*>", "private", "", "public" ] }, diff --git a/libcxx/include/module.modulemap.in b/libcxx/include/module.modulemap.in --- a/libcxx/include/module.modulemap.in +++ b/libcxx/include/module.modulemap.in @@ -1423,6 +1423,14 @@ header "stdexcept" export * } + module stop_token { + export * + module __stop_token { + module atomic_unique_lock { private header "__stop_token/atomic_unique_lock.h" } + module intrusive_list { private header "__stop_token/intrusive_list.h" } + module intrusive_shared_ptr { private header "__stop_token/intrusive_shared_ptr.h" } + } + } module streambuf { @requires_LIBCXX_ENABLE_LOCALIZATION@ header "streambuf" diff --git a/libcxx/test/libcxx/private_headers.verify.cpp b/libcxx/test/libcxx/private_headers.verify.cpp --- a/libcxx/test/libcxx/private_headers.verify.cpp +++ b/libcxx/test/libcxx/private_headers.verify.cpp @@ -617,6 +617,9 @@ #include <__ranges/zip_view.h> // expected-error@*:* {{use of private header from outside its module: '__ranges/zip_view.h'}} #include <__split_buffer> // expected-error@*:* {{use of private header from outside its module: '__split_buffer'}} #include <__std_mbstate_t.h> // expected-error@*:* {{use of private header from outside its module: '__std_mbstate_t.h'}} +#include <__stop_token/atomic_unique_lock.h> // expected-error@*:* {{use of private header from outside its module: '__stop_token/atomic_unique_lock.h'}} +#include <__stop_token/intrusive_list.h> // expected-error@*:* {{use of private header from outside its module: '__stop_token/intrusive_list.h'}} +#include <__stop_token/intrusive_shared_ptr.h> // expected-error@*:* {{use of private header from outside its module: '__stop_token/intrusive_shared_ptr.h'}} #include <__string/char_traits.h> // expected-error@*:* {{use of private header from outside its module: '__string/char_traits.h'}} #include <__string/constexpr_c_functions.h> // expected-error@*:* {{use of private header from outside its module: '__string/constexpr_c_functions.h'}} #include <__string/extern_template_lists.h> // expected-error@*:* {{use of private header from outside its module: '__string/extern_template_lists.h'}} diff --git a/libcxx/test/libcxx/thread/thread.stoptoken/atomic_unique_lock.pass.cpp b/libcxx/test/libcxx/thread/thread.stoptoken/atomic_unique_lock.pass.cpp new file mode 100644 --- /dev/null +++ b/libcxx/test/libcxx/thread/thread.stoptoken/atomic_unique_lock.pass.cpp @@ -0,0 +1,85 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// UNSUPPORTED: no-threads + +// XFAIL: availability-synchronization_library-missing + +// UNSUPPORTED: c++03, c++11, c++14, c++17 +// ADDITIONAL_COMPILE_FLAGS: -Wno-private-header + +#include <__stop_token/atomic_unique_lock.h> +#include +#include +#include +#include + +#include "make_test_thread.h" +#include "test_macros.h" + +using Lock = std::__atomic_unique_lock; + +int main(int, char**) { + // lock on constructor + { + std::atomic state{0}; + Lock l(state); + assert(l.__owns_lock()); + } + + // always give up locking + { + std::atomic state{0}; + Lock l(state, [](auto const&) { return true; }); + assert(!l.__owns_lock()); + } + + // test overload that has custom state after lock + { + std::atomic state{0}; + auto neverGiveUpLocking = [](auto const&) { return false; }; + auto stateAfter = [](auto) { return uint8_t{255}; }; + Lock l(state, neverGiveUpLocking, stateAfter, std::memory_order_acq_rel); + assert(l.__owns_lock()); + assert(state.load() == 255); + } + + // lock and unlock + { + std::atomic state{0}; + Lock l(state); + assert(l.__owns_lock()); + + l.__unlock(); + assert(!l.__owns_lock()); + + l.__lock(); + assert(l.__owns_lock()); + } + + // lock blocking + { + std::atomic state{0}; + int i = 0; + Lock l1(state); + + auto thread1 = support::make_test_thread([&] { + std::this_thread::sleep_for(std::chrono::milliseconds{10}); + i = 5; + l1.__unlock(); + }); + + Lock l2(state); + // l2's lock has to wait for l1's unlocking + assert(i == 5); + + thread1.join(); + } + + return 0; +} diff --git a/libcxx/test/libcxx/thread/thread.stoptoken/intrusive_list.pass.cpp b/libcxx/test/libcxx/thread/thread.stoptoken/intrusive_list.pass.cpp new file mode 100644 --- /dev/null +++ b/libcxx/test/libcxx/thread/thread.stoptoken/intrusive_list.pass.cpp @@ -0,0 +1,108 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// + +// UNSUPPORTED: c++03, c++11, c++14, c++17 +// ADDITIONAL_COMPILE_FLAGS: -Wno-private-header + +#include <__stop_token/intrusive_list.h> +#include + +#include "test_macros.h" + +struct Node : std::__intrusive_node_base { + int i; + + Node(int ii) : i(ii) {} +}; + +using List = std::__intrusive_list; + +int main(int, char**) { + // empty + { + List list; + assert(list.__empty()); + } + + // push_front + { + List list; + Node n1(5); + list.__push_front(&n1); + assert(!list.__empty()); + } + + // pop_front + { + List list; + Node n1(5); + Node n2(6); + list.__push_front(&n1); + list.__push_front(&n2); + + auto f1 = list.__pop_front(); + assert(f1->i == 6); + + auto f2 = list.__pop_front(); + assert(f2->i == 5); + + assert(list.__empty()); + } + + // remove head + { + List list; + Node n1(5); + Node n2(6); + list.__push_front(&n1); + list.__push_front(&n2); + + list.__remove(&n2); + + auto f = list.__pop_front(); + assert(f->i == 5); + + assert(list.__empty()); + } + + // remove non-head + { + List list; + Node n1(5); + Node n2(6); + Node n3(7); + list.__push_front(&n1); + list.__push_front(&n2); + list.__push_front(&n3); + + list.__remove(&n2); + + auto f1 = list.__pop_front(); + assert(f1->i == 7); + + auto f2 = list.__pop_front(); + assert(f2->i == 5); + + assert(list.__empty()); + } + + // is_head + { + List list; + Node n1(5); + Node n2(6); + list.__push_front(&n1); + list.__push_front(&n2); + + assert(!list.__is_head(&n1)); + assert(list.__is_head(&n2)); + } + + return 0; +} diff --git a/libcxx/test/libcxx/thread/thread.stoptoken/intrusive_shared_ptr.pass.cpp b/libcxx/test/libcxx/thread/thread.stoptoken/intrusive_shared_ptr.pass.cpp new file mode 100644 --- /dev/null +++ b/libcxx/test/libcxx/thread/thread.stoptoken/intrusive_shared_ptr.pass.cpp @@ -0,0 +1,88 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// + +// UNSUPPORTED: c++03, c++11, c++14, c++17 +// ADDITIONAL_COMPILE_FLAGS: -Wno-private-header + +#include <__stop_token/intrusive_shared_ptr.h> +#include +#include +#include + +#include "test_macros.h" + +struct Object { + std::atomic counter = 0; +}; + +template <> +struct std::__intrusive_shared_ptr_traits { + static std::atomic& __get_atomic_ref_count(Object& obj) { return obj.counter; } +}; + +using Ptr = std::__intrusive_shared_ptr; + +int main(int, char**) { + // default + { + Ptr ptr; + assert(!ptr); + } + + // raw ptr + { + auto object = new Object; + Ptr ptr(object); + assert(ptr->counter == 1); + } + + // copy + { + auto object = new Object; + Ptr ptr(object); + auto ptr2 = ptr; + assert(ptr->counter == 2); + assert(ptr2->counter == 2); + } + + // move + { + auto object = new Object; + Ptr ptr(object); + auto ptr2 = std::move(ptr); + assert(!ptr); + assert(ptr2->counter == 1); + } + + // copy assign + { + auto object1 = new Object; + auto object2 = new Object; + Ptr ptr1(object1); + Ptr ptr2(object2); + + ptr1 = ptr2; + assert(ptr1->counter = 2); + assert(ptr2->counter = 2); + } + + // move assign + { + auto object1 = new Object; + auto object2 = new Object; + Ptr ptr1(object1); + Ptr ptr2(object2); + + ptr1 = std::move(ptr2); + assert(ptr1->counter = 2); + assert(!ptr2); + } + + return 0; +}