|
| 1 | +//===----------------------------------------------------------------------===// |
| 2 | +// |
| 3 | +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | +// See https://llvm.org/LICENSE.txt for license information. |
| 5 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | +// |
| 7 | +//===----------------------------------------------------------------------===// |
| 8 | +#ifndef LIBCXXABI_SRC_INCLUDE_CXA_GUARD_IMPL_H |
| 9 | +#define LIBCXXABI_SRC_INCLUDE_CXA_GUARD_IMPL_H |
| 10 | + |
| 11 | +/* cxa_guard_impl.h - Implements the C++ runtime support for function local |
| 12 | + * static guards. |
| 13 | + * The layout of the guard object is the same across ARM and Itanium. |
| 14 | + * |
| 15 | + * The first "guard byte" (which is checked by the compiler) is set only upon |
| 16 | + * the completion of cxa release. |
| 17 | + * |
| 18 | + * The second "init byte" does the rest of the bookkeeping. It tracks if |
| 19 | + * initialization is complete or pending, and if there are waiting threads. |
| 20 | + * |
| 21 | + * If the guard variable is 64-bits and the platforms supplies a 32-bit thread |
| 22 | + * identifier, it is used to detect recursive initialization. The thread ID of |
| 23 | + * the thread currently performing initialization is stored in the second word. |
| 24 | + * |
| 25 | + * Guard Object Layout: |
| 26 | + * ------------------------------------------------------------------------- |
| 27 | + * |a: guard byte | a+1: init byte | a+2 : unused ... | a+4: thread-id ... | |
| 28 | + * ------------------------------------------------------------------------ |
| 29 | + * |
| 30 | + * Access Protocol: |
| 31 | + * For each implementation the guard byte is checked and set before accessing |
| 32 | + * the init byte. |
| 33 | + * |
| 34 | + * Overall Design: |
| 35 | + * The implementation was designed to allow each implementation to be tested |
| 36 | + * independent of the C++ runtime or platform support. |
| 37 | + * |
| 38 | + */ |
| 39 | + |
| 40 | +#include "__cxxabi_config.h" |
| 41 | +#include "include/atomic_support.h" |
| 42 | +#include <unistd.h> |
| 43 | +#include <sys/types.h> |
| 44 | +#if defined(__has_include) |
| 45 | +# if __has_include(<sys/syscall.h>) |
| 46 | +# include <sys/syscall.h> |
| 47 | +# endif |
| 48 | +#endif |
| 49 | + |
| 50 | +#include <stdlib.h> |
| 51 | +#include <__threading_support> |
| 52 | + |
| 53 | +// To make testing possible, this header is included from both cxa_guard.cpp |
| 54 | +// and a number of tests. |
| 55 | +// |
| 56 | +// For this reason we place everything in an anonymous namespace -- even though |
| 57 | +// we're in a header. We want the actual implementation and the tests to have |
| 58 | +// unique definitions of the types in this header (since the tests may depend |
| 59 | +// on function local statics). |
| 60 | +// |
| 61 | +// To enforce this either `BUILDING_CXA_GUARD` or `TESTING_CXA_GUARD` must be |
| 62 | +// defined when including this file. Only `src/cxa_guard.cpp` should define |
| 63 | +// the former. |
| 64 | +#ifdef BUILDING_CXA_GUARD |
| 65 | +# include "abort_message.h" |
| 66 | +# define ABORT_WITH_MESSAGE(...) ::abort_message(__VA_ARGS__) |
| 67 | +#elif defined(TESTING_CXA_GUARD) |
| 68 | +# define ABORT_WITH_MESSAGE(...) ::abort() |
| 69 | +#else |
| 70 | +# error "Either BUILDING_CXA_GUARD or TESTING_CXA_GUARD must be defined" |
| 71 | +#endif |
| 72 | + |
| 73 | + |
| 74 | +namespace __cxxabiv1 { |
| 75 | +// Use an anonymous namespace to ensure that the tests and actual implementation |
| 76 | +// have unique definitions of these symbols. |
| 77 | +namespace { |
| 78 | + |
| 79 | +//===----------------------------------------------------------------------===// |
| 80 | +// Misc Utilities |
| 81 | +//===----------------------------------------------------------------------===// |
| 82 | + |
| 83 | +template <class T, T(*Init)()> |
| 84 | +struct LazyValue { |
| 85 | + LazyValue() : is_init(false) {} |
| 86 | + |
| 87 | + T& get() { |
| 88 | + if (!is_init) { |
| 89 | + value = Init(); |
| 90 | + is_init = true; |
| 91 | + } |
| 92 | + return value; |
| 93 | + } |
| 94 | + private: |
| 95 | + T value; |
| 96 | + bool is_init = false; |
| 97 | +}; |
| 98 | + |
| 99 | +//===----------------------------------------------------------------------===// |
| 100 | +// PlatformGetThreadID |
| 101 | +//===----------------------------------------------------------------------===// |
| 102 | + |
| 103 | +#if defined(__APPLE__) && defined(_LIBCPP_HAS_THREAD_API_PTHREAD) |
| 104 | +uint32_t PlatformThreadID() { |
| 105 | + static_assert(sizeof(mach_port_t) == sizeof(uint32_t), ""); |
| 106 | + return static_cast<uint32_t>( |
| 107 | + pthread_mach_thread_np(std::__libcpp_thread_get_current_id())); |
| 108 | +} |
| 109 | +#elif defined(SYS_gettid) && defined(_LIBCPP_HAS_THREAD_API_PTHREAD) |
| 110 | +uint32_t PlatformThreadID() { |
| 111 | + static_assert(sizeof(pid_t) == sizeof(uint32_t), ""); |
| 112 | + return static_cast<uint32_t>(syscall(SYS_gettid)); |
| 113 | +} |
| 114 | +#else |
| 115 | +constexpr uint32_t (*PlatformThreadID)() = nullptr; |
| 116 | +#endif |
| 117 | + |
| 118 | + |
| 119 | +constexpr bool DoesPlatformSupportThreadID() { |
| 120 | +#ifdef __clang__ |
| 121 | +#pragma clang diagnostic push |
| 122 | +#pragma clang diagnostic ignored "-Wtautological-pointer-compare" |
| 123 | +#endif |
| 124 | + return +PlatformThreadID != nullptr; |
| 125 | +#ifdef __clang__ |
| 126 | +#pragma clang diagnostic pop |
| 127 | +#endif |
| 128 | +} |
| 129 | + |
| 130 | +//===----------------------------------------------------------------------===// |
| 131 | +// GuardBase |
| 132 | +//===----------------------------------------------------------------------===// |
| 133 | + |
| 134 | +enum class AcquireResult { |
| 135 | + INIT_IS_DONE, |
| 136 | + INIT_IS_PENDING, |
| 137 | +}; |
| 138 | +constexpr AcquireResult INIT_IS_DONE = AcquireResult::INIT_IS_DONE; |
| 139 | +constexpr AcquireResult INIT_IS_PENDING = AcquireResult::INIT_IS_PENDING; |
| 140 | + |
| 141 | +static constexpr uint8_t UNSET = 0; |
| 142 | +static constexpr uint8_t COMPLETE_BIT = (1 << 0); |
| 143 | +static constexpr uint8_t PENDING_BIT = (1 << 1); |
| 144 | +static constexpr uint8_t WAITING_BIT = (1 << 2); |
| 145 | + |
| 146 | +template <class Derived> |
| 147 | +struct GuardObject { |
| 148 | + GuardObject() = delete; |
| 149 | + GuardObject(GuardObject const&) = delete; |
| 150 | + GuardObject& operator=(GuardObject const&) = delete; |
| 151 | + |
| 152 | + explicit GuardObject(uint32_t* g) |
| 153 | + : base_address(g), guard_byte_address(reinterpret_cast<uint8_t*>(g)), |
| 154 | + init_byte_address(reinterpret_cast<uint8_t*>(g) + 1), |
| 155 | + thread_id_address(nullptr) {} |
| 156 | + |
| 157 | + explicit GuardObject(uint64_t* g) |
| 158 | + : base_address(g), guard_byte_address(reinterpret_cast<uint8_t*>(g)), |
| 159 | + init_byte_address(reinterpret_cast<uint8_t*>(g) + 1), |
| 160 | + thread_id_address(reinterpret_cast<uint32_t*>(g) + 1) {} |
| 161 | + |
| 162 | +public: |
| 163 | + /// Implements __cxa_guard_acquire |
| 164 | + AcquireResult cxa_guard_acquire() { |
| 165 | + AtomicInt<uint8_t> guard_byte(guard_byte_address); |
| 166 | + if (guard_byte.load(std::_AO_Acquire) == COMPLETE_BIT) |
| 167 | + return INIT_IS_DONE; |
| 168 | + return derived()->acquire_init_byte(); |
| 169 | + } |
| 170 | + |
| 171 | + /// Implements __cxa_guard_release |
| 172 | + void cxa_guard_release() { |
| 173 | + AtomicInt<uint8_t> guard_byte(guard_byte_address); |
| 174 | + // Store complete first, so that when release wakes other folks, they see |
| 175 | + // it as having been completed. |
| 176 | + guard_byte.store(COMPLETE_BIT, std::_AO_Release); |
| 177 | + derived()->release_init_byte(); |
| 178 | + } |
| 179 | + |
| 180 | + /// Implements __cxa_guard_abort |
| 181 | + void cxa_guard_abort() { derived()->abort_init_byte(); } |
| 182 | + |
| 183 | +public: |
| 184 | + /// base_address - the address of the original guard object. |
| 185 | + void* const base_address; |
| 186 | + /// The address of the guord byte at offset 0. |
| 187 | + uint8_t* const guard_byte_address; |
| 188 | + /// The address of the byte used by the implementation during initialization. |
| 189 | + uint8_t* const init_byte_address; |
| 190 | + /// An optional address storing an identifier for the thread performing initialization. |
| 191 | + /// It's used to detect recursive initialization. |
| 192 | + uint32_t* const thread_id_address; |
| 193 | + |
| 194 | +private: |
| 195 | + Derived* derived() { return static_cast<Derived*>(this); } |
| 196 | +}; |
| 197 | + |
| 198 | +//===----------------------------------------------------------------------===// |
| 199 | +// Single Threaded Implementation |
| 200 | +//===----------------------------------------------------------------------===// |
| 201 | + |
| 202 | +struct InitByteNoThreads : GuardObject<InitByteNoThreads> { |
| 203 | + using GuardObject::GuardObject; |
| 204 | + |
| 205 | + AcquireResult acquire_init_byte() { |
| 206 | + if (*init_byte_address == COMPLETE_BIT) |
| 207 | + return INIT_IS_DONE; |
| 208 | + if (*init_byte_address & PENDING_BIT) |
| 209 | + ABORT_WITH_MESSAGE("__cxa_guard_acquire detected recursive initialization"); |
| 210 | + *init_byte_address = PENDING_BIT; |
| 211 | + return INIT_IS_PENDING; |
| 212 | + } |
| 213 | + |
| 214 | + void release_init_byte() { *init_byte_address = COMPLETE_BIT; } |
| 215 | + void abort_init_byte() { *init_byte_address = UNSET; } |
| 216 | +}; |
| 217 | + |
| 218 | + |
| 219 | +//===----------------------------------------------------------------------===// |
| 220 | +// Global Mutex Implementation |
| 221 | +//===----------------------------------------------------------------------===// |
| 222 | + |
| 223 | +struct LibcppMutex; |
| 224 | +struct LibcppCondVar; |
| 225 | + |
| 226 | +#ifndef _LIBCXXABI_HAS_NO_THREADS |
| 227 | +struct LibcppMutex { |
| 228 | + LibcppMutex() = default; |
| 229 | + LibcppMutex(LibcppMutex const&) = delete; |
| 230 | + LibcppMutex& operator=(LibcppMutex const&) = delete; |
| 231 | + |
| 232 | + bool lock() { return std::__libcpp_mutex_lock(&mutex); } |
| 233 | + bool unlock() { return std::__libcpp_mutex_unlock(&mutex); } |
| 234 | + |
| 235 | +private: |
| 236 | + friend struct LibcppCondVar; |
| 237 | + std::__libcpp_mutex_t mutex = _LIBCPP_MUTEX_INITIALIZER; |
| 238 | +}; |
| 239 | + |
| 240 | +struct LibcppCondVar { |
| 241 | + LibcppCondVar() = default; |
| 242 | + LibcppCondVar(LibcppCondVar const&) = delete; |
| 243 | + LibcppCondVar& operator=(LibcppCondVar const&) = delete; |
| 244 | + |
| 245 | + bool wait(LibcppMutex& mut) { |
| 246 | + return std::__libcpp_condvar_wait(&cond, &mut.mutex); |
| 247 | + } |
| 248 | + bool broadcast() { return std::__libcpp_condvar_broadcast(&cond); } |
| 249 | + |
| 250 | +private: |
| 251 | + std::__libcpp_condvar_t cond = _LIBCPP_CONDVAR_INITIALIZER; |
| 252 | +}; |
| 253 | +#endif // !defined(_LIBCXXABI_HAS_NO_THREADS) |
| 254 | + |
| 255 | + |
| 256 | +template <class Mutex, class CondVar, Mutex& global_mutex, CondVar& global_cond, |
| 257 | + uint32_t (*GetThreadID)() = PlatformThreadID> |
| 258 | +struct InitByteGlobalMutex |
| 259 | + : GuardObject<InitByteGlobalMutex<Mutex, CondVar, global_mutex, global_cond, |
| 260 | + GetThreadID>> { |
| 261 | + |
| 262 | + using BaseT = typename InitByteGlobalMutex::GuardObject; |
| 263 | + using BaseT::BaseT; |
| 264 | + |
| 265 | + explicit InitByteGlobalMutex(uint32_t *g) |
| 266 | + : BaseT(g), has_thread_id_support(false) {} |
| 267 | + explicit InitByteGlobalMutex(uint64_t *g) |
| 268 | + : BaseT(g), has_thread_id_support(DoesPlatformSupportThreadID()) {} |
| 269 | + |
| 270 | +public: |
| 271 | + AcquireResult acquire_init_byte() { |
| 272 | + LockGuard g("__cxa_guard_acquire"); |
| 273 | + // Check for possible recursive initialization. |
| 274 | + if (has_thread_id_support && (*init_byte_address & PENDING_BIT)) { |
| 275 | + if (*thread_id_address == current_thread_id.get()) |
| 276 | + ABORT_WITH_MESSAGE("__cxa_guard_acquire detected recursive initialization"); |
| 277 | + } |
| 278 | + |
| 279 | + // Wait until the pending bit is not set. |
| 280 | + while (*init_byte_address & PENDING_BIT) { |
| 281 | + *init_byte_address |= WAITING_BIT; |
| 282 | + global_cond.wait(global_mutex); |
| 283 | + } |
| 284 | + |
| 285 | + if (*init_byte_address == COMPLETE_BIT) |
| 286 | + return INIT_IS_DONE; |
| 287 | + |
| 288 | + if (has_thread_id_support) |
| 289 | + *thread_id_address = current_thread_id.get(); |
| 290 | + |
| 291 | + *init_byte_address = PENDING_BIT; |
| 292 | + return INIT_IS_PENDING; |
| 293 | + } |
| 294 | + |
| 295 | + void release_init_byte() { |
| 296 | + bool has_waiting; |
| 297 | + { |
| 298 | + LockGuard g("__cxa_guard_release"); |
| 299 | + has_waiting = *init_byte_address & WAITING_BIT; |
| 300 | + *init_byte_address = COMPLETE_BIT; |
| 301 | + } |
| 302 | + if (has_waiting) { |
| 303 | + if (global_cond.broadcast()) { |
| 304 | + ABORT_WITH_MESSAGE("%s failed to broadcast", "__cxa_guard_release"); |
| 305 | + } |
| 306 | + } |
| 307 | + } |
| 308 | + |
| 309 | + void abort_init_byte() { |
| 310 | + bool has_waiting; |
| 311 | + { |
| 312 | + LockGuard g("__cxa_guard_abort"); |
| 313 | + if (has_thread_id_support) |
| 314 | + *thread_id_address = 0; |
| 315 | + has_waiting = *init_byte_address & WAITING_BIT; |
| 316 | + *init_byte_address = UNSET; |
| 317 | + } |
| 318 | + if (has_waiting) { |
| 319 | + if (global_cond.broadcast()) { |
| 320 | + ABORT_WITH_MESSAGE("%s failed to broadcast", "__cxa_guard_abort"); |
| 321 | + } |
| 322 | + } |
| 323 | + } |
| 324 | + |
| 325 | +private: |
| 326 | + using BaseT::init_byte_address; |
| 327 | + using BaseT::thread_id_address; |
| 328 | + const bool has_thread_id_support; |
| 329 | + LazyValue<uint32_t, GetThreadID> current_thread_id; |
| 330 | + |
| 331 | +private: |
| 332 | + struct LockGuard { |
| 333 | + LockGuard() = delete; |
| 334 | + LockGuard(LockGuard const&) = delete; |
| 335 | + LockGuard& operator=(LockGuard const&) = delete; |
| 336 | + |
| 337 | + explicit LockGuard(const char* calling_func) |
| 338 | + : calling_func(calling_func) { |
| 339 | + if (global_mutex.lock()) |
| 340 | + ABORT_WITH_MESSAGE("%s failed to acquire mutex", calling_func); |
| 341 | + } |
| 342 | + |
| 343 | + ~LockGuard() { |
| 344 | + if (global_mutex.unlock()) |
| 345 | + ABORT_WITH_MESSAGE("%s failed to release mutex", calling_func); |
| 346 | + } |
| 347 | + |
| 348 | + private: |
| 349 | + const char* const calling_func; |
| 350 | + }; |
| 351 | +}; |
| 352 | + |
| 353 | +//===----------------------------------------------------------------------===// |
| 354 | +// Futex Implementation |
| 355 | +//===----------------------------------------------------------------------===// |
| 356 | + |
| 357 | +#if defined(SYS_futex) |
| 358 | +void PlatformFutexWait(int* addr, int expect) { |
| 359 | + constexpr int WAIT = 0; |
| 360 | + syscall(SYS_futex, addr, WAIT, expect, 0); |
| 361 | +} |
| 362 | +void PlatformFutexWake(int* addr) { |
| 363 | + constexpr int WAKE = 1; |
| 364 | + syscall(SYS_futex, addr, WAKE, INT_MAX); |
| 365 | +} |
| 366 | +#else |
| 367 | +constexpr void (*PlatformFutexWait)(int*, int) = nullptr; |
| 368 | +constexpr void (*PlatformFutexWake)(int*) = nullptr; |
| 369 | +#endif |
| 370 | + |
| 371 | +constexpr bool DoesPlatformSupportFutex() { |
| 372 | +#ifdef __clang__ |
| 373 | +#pragma clang diagnostic push |
| 374 | +#pragma clang diagnostic ignored "-Wtautological-pointer-compare" |
| 375 | +#endif |
| 376 | + return +PlatformFutexWait != nullptr; |
| 377 | +#ifdef __clang__ |
| 378 | +#pragma clang diagnostic pop |
| 379 | +#endif |
| 380 | +} |
| 381 | + |
| 382 | +/// InitByteFutex - Manages initialization using atomics and the futex syscall |
| 383 | +/// for waiting and waking. |
| 384 | +template <void (*Wait)(int*, int) = PlatformFutexWait, |
| 385 | + void (*Wake)(int*) = PlatformFutexWake, |
| 386 | + uint32_t (*GetThreadIDArg)() = PlatformThreadID> |
| 387 | +struct InitByteFutex : GuardObject<InitByteFutex<Wait, Wake, GetThreadIDArg>> { |
| 388 | + using BaseT = typename InitByteFutex::GuardObject; |
| 389 | + |
| 390 | + /// ARM Constructor |
| 391 | + explicit InitByteFutex(uint32_t *g) : BaseT(g), |
| 392 | + init_byte(this->init_byte_address), |
| 393 | + has_thread_id_support(this->thread_id_address && GetThreadIDArg), |
| 394 | + thread_id(this->thread_id_address) {} |
| 395 | + |
| 396 | + /// Itanium Constructor |
| 397 | + explicit InitByteFutex(uint64_t *g) : BaseT(g), |
| 398 | + init_byte(this->init_byte_address), |
| 399 | + has_thread_id_support(this->thread_id_address && GetThreadIDArg), |
| 400 | + thread_id(this->thread_id_address) {} |
| 401 | + |
| 402 | +public: |
| 403 | + AcquireResult acquire_init_byte() { |
| 404 | + while (true) { |
| 405 | + uint8_t last_val = UNSET; |
| 406 | + if (init_byte.compare_exchange(&last_val, PENDING_BIT, std::_AO_Acq_Rel, |
| 407 | + std::_AO_Acquire)) { |
| 408 | + if (has_thread_id_support) { |
| 409 | + thread_id.store(current_thread_id.get(), std::_AO_Relaxed); |
| 410 | + } |
| 411 | + return INIT_IS_PENDING; |
| 412 | + } |
| 413 | + |
| 414 | + if (last_val == COMPLETE_BIT) |
| 415 | + return INIT_IS_DONE; |
| 416 | + |
| 417 | + if (last_val & PENDING_BIT) { |
| 418 | + |
| 419 | + // Check for recursive initialization |
| 420 | + if (has_thread_id_support && thread_id.load(std::_AO_Relaxed) == current_thread_id.get()) { |
| 421 | + ABORT_WITH_MESSAGE("__cxa_guard_acquire detected recursive initialization"); |
| 422 | + } |
| 423 | + |
| 424 | + if ((last_val & WAITING_BIT) == 0) { |
| 425 | + // This compare exchange can fail for several reasons |
| 426 | + // (1) another thread finished the whole thing before we got here |
| 427 | + // (2) another thread set the waiting bit we were trying to thread |
| 428 | + // (3) another thread had an exception and failed to finish |
| 429 | + if (!init_byte.compare_exchange(&last_val, PENDING_BIT | WAITING_BIT, |
| 430 | + std::_AO_Acq_Rel, std::_AO_Release)) { |
| 431 | + // (1) success, via someone else's work! |
| 432 | + if (last_val == COMPLETE_BIT) |
| 433 | + return INIT_IS_DONE; |
| 434 | + |
| 435 | + // (3) someone else, bailed on doing the work, retry from the start! |
| 436 | + if (last_val == UNSET) |
| 437 | + continue; |
| 438 | + |
| 439 | + // (2) the waiting bit got set, so we are happy to keep waiting |
| 440 | + } |
| 441 | + } |
| 442 | + wait_on_initialization(); |
| 443 | + } |
| 444 | + } |
| 445 | + } |
| 446 | + |
| 447 | + void release_init_byte() { |
| 448 | + uint8_t old = init_byte.exchange(COMPLETE_BIT, std::_AO_Acq_Rel); |
| 449 | + if (old & WAITING_BIT) |
| 450 | + wake_all(); |
| 451 | + } |
| 452 | + |
| 453 | + void abort_init_byte() { |
| 454 | + if (has_thread_id_support) |
| 455 | + thread_id.store(0, std::_AO_Relaxed); |
| 456 | + |
| 457 | + uint8_t old = init_byte.exchange(0, std::_AO_Acq_Rel); |
| 458 | + if (old & WAITING_BIT) |
| 459 | + wake_all(); |
| 460 | + } |
| 461 | + |
| 462 | +private: |
| 463 | + /// Use the futex to wait on the current guard variable. Futex expects a |
| 464 | + /// 32-bit 4-byte aligned address as the first argument, so we have to use use |
| 465 | + /// the base address of the guard variable (not the init byte). |
| 466 | + void wait_on_initialization() { |
| 467 | + Wait(static_cast<int*>(this->base_address), |
| 468 | + expected_value_for_futex(PENDING_BIT | WAITING_BIT)); |
| 469 | + } |
| 470 | + void wake_all() { Wake(static_cast<int*>(this->base_address)); } |
| 471 | + |
| 472 | +private: |
| 473 | + AtomicInt<uint8_t> init_byte; |
| 474 | + |
| 475 | + const bool has_thread_id_support; |
| 476 | + // Unsafe to use unless has_thread_id_support |
| 477 | + AtomicInt<uint32_t> thread_id; |
| 478 | + LazyValue<uint32_t, GetThreadIDArg> current_thread_id; |
| 479 | + |
| 480 | + /// Create the expected integer value for futex `wait(int* addr, int expected)`. |
| 481 | + /// We pass the base address as the first argument, So this function creates |
| 482 | + /// an zero-initialized integer with `b` copied at the correct offset. |
| 483 | + static int expected_value_for_futex(uint8_t b) { |
| 484 | + int dest_val = 0; |
| 485 | + std::memcpy(reinterpret_cast<char*>(&dest_val) + 1, &b, 1); |
| 486 | + return dest_val; |
| 487 | + } |
| 488 | + |
| 489 | + static_assert(Wait != nullptr && Wake != nullptr, ""); |
| 490 | +}; |
| 491 | + |
| 492 | +//===----------------------------------------------------------------------===// |
| 493 | +// |
| 494 | +//===----------------------------------------------------------------------===// |
| 495 | + |
| 496 | +template <class T> |
| 497 | +struct GlobalStatic { |
| 498 | + static T instance; |
| 499 | +}; |
| 500 | +template <class T> |
| 501 | +_LIBCPP_SAFE_STATIC T GlobalStatic<T>::instance = {}; |
| 502 | + |
| 503 | +enum class Implementation { |
| 504 | + NoThreads, |
| 505 | + GlobalLock, |
| 506 | + Futex |
| 507 | +}; |
| 508 | + |
| 509 | +template <Implementation Impl> |
| 510 | +struct SelectImplementation; |
| 511 | + |
| 512 | +template <> |
| 513 | +struct SelectImplementation<Implementation::NoThreads> { |
| 514 | + using type = InitByteNoThreads; |
| 515 | +}; |
| 516 | + |
| 517 | +template <> |
| 518 | +struct SelectImplementation<Implementation::GlobalLock> { |
| 519 | + using type = InitByteGlobalMutex< |
| 520 | + LibcppMutex, LibcppCondVar, GlobalStatic<LibcppMutex>::instance, |
| 521 | + GlobalStatic<LibcppCondVar>::instance, PlatformThreadID>; |
| 522 | +}; |
| 523 | + |
| 524 | +template <> |
| 525 | +struct SelectImplementation<Implementation::Futex> { |
| 526 | + using type = |
| 527 | + InitByteFutex<PlatformFutexWait, PlatformFutexWake, PlatformThreadID>; |
| 528 | +}; |
| 529 | + |
| 530 | +// TODO(EricWF): We should prefer the futex implementation when available. But |
| 531 | +// it should be done in a separate step from adding the implementation. |
| 532 | +constexpr Implementation CurrentImplementation = |
| 533 | +#if defined(_LIBCXXABI_HAS_NO_THREADS) |
| 534 | + Implementation::NoThreads; |
| 535 | +#elif defined(_LIBCXXABI_USE_FUTEX) |
| 536 | + Implementation::Futex; |
| 537 | +#else |
| 538 | + Implementation::GlobalLock; |
| 539 | +#endif |
| 540 | + |
| 541 | +static_assert(CurrentImplementation != Implementation::Futex |
| 542 | + || DoesPlatformSupportFutex(), "Futex selected but not supported"); |
| 543 | + |
| 544 | +using SelectedImplementation = |
| 545 | + SelectImplementation<CurrentImplementation>::type; |
| 546 | + |
| 547 | +} // end namespace |
| 548 | +} // end namespace __cxxabiv1 |
| 549 | + |
| 550 | +#endif // LIBCXXABI_SRC_INCLUDE_CXA_GUARD_IMPL_H |
0 commit comments