diff --git a/compiler-rt/lib/scudo/standalone/CMakeLists.txt b/compiler-rt/lib/scudo/standalone/CMakeLists.txt --- a/compiler-rt/lib/scudo/standalone/CMakeLists.txt +++ b/compiler-rt/lib/scudo/standalone/CMakeLists.txt @@ -61,6 +61,9 @@ bytemap.h checksum.h chunk.h + condition_variable.h + condition_variable_base.h + condition_variable_linux.h combined.h common.h flags_parser.h @@ -102,6 +105,7 @@ set(SCUDO_SOURCES checksum.cpp common.cpp + condition_variable_linux.cpp crc32_hw.cpp flags_parser.cpp flags.cpp diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h --- a/compiler-rt/lib/scudo/standalone/allocator_config.h +++ b/compiler-rt/lib/scudo/standalone/allocator_config.h @@ -11,6 +11,7 @@ #include "combined.h" #include "common.h" +#include "condition_variable.h" #include "flags.h" #include "primary32.h" #include "primary64.h" @@ -82,6 +83,14 @@ // // Defines the minimal & maximal release interval that can be set. // static const s32 MinReleaseToOsIntervalMs = INT32_MIN; // static const s32 MaxReleaseToOsIntervalMs = INT32_MAX; +// +// // Use condition variable to shorten the waiting time of refillment of +// // freelist. Note that this depends on the implementation of condition +// // variable on each platform and the performance may vary so that it +// // doesn't guarantee a performance benefit. +// // Note that both variables have to be defined to enable it. +// static const bool UseConditionVariable = true; +// using ConditionVariableT = ConditionVariableLinux; // }; // // Defines the type of Primary allocator to use. // template using PrimaryT = SizeClassAllocator64; diff --git a/compiler-rt/lib/scudo/standalone/condition_variable.h b/compiler-rt/lib/scudo/standalone/condition_variable.h new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/scudo/standalone/condition_variable.h @@ -0,0 +1,60 @@ +//===-- condition_variable.h ------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_CONDITION_VARIABLE_H_ +#define SCUDO_CONDITION_VARIABLE_H_ + +#include "condition_variable_base.h" + +#include "common.h" +#include "platform.h" + +#include "condition_variable_linux.h" + +namespace scudo { + +// A default implementation of default condition variable. It doesn't do a real +// `wait`, instead it spins a short amount of time only. +class ConditionVariableDummy + : public ConditionVariableBase { +public: + void notifyAllImpl(UNUSED HybridMutex &M) REQUIRES(M) {} + + void waitImpl(UNUSED HybridMutex &M) REQUIRES(M) { + M.unlock(); + + constexpr u32 SpinTimes = 64; + volatile u32 V = 0; + for (u32 I = 0; I < SpinTimes; ++I) { + u32 Tmp = V + 1; + V = Tmp; + } + + M.lock(); + } +}; + +template +struct ConditionVariableState { + static constexpr bool enabled() { return false; } + // This is only used for compilation purpose so that we won't end up having + // many conditional compilations. If you want to use `ConditionVariableDummy`, + // define `ConditionVariableT` in your allocator configuration. See + // allocator_config.h for more details. + using ConditionVariableT = ConditionVariableDummy; +}; + +template +struct ConditionVariableState { + static constexpr bool enabled() { return true; } + using ConditionVariableT = typename Config::ConditionVariableT; +}; + +} // namespace scudo + +#endif // SCUDO_CONDITION_VARIABLE_H_ diff --git a/compiler-rt/lib/scudo/standalone/condition_variable_base.h b/compiler-rt/lib/scudo/standalone/condition_variable_base.h new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/scudo/standalone/condition_variable_base.h @@ -0,0 +1,57 @@ +//===-- condition_variable_base.h ------------------------------------*- C++ +//-*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_CONDITION_VARIABLE_BASE_H_ +#define SCUDO_CONDITION_VARIABLE_BASE_H_ + +#include "mutex.h" +#include "thread_annotations.h" + +namespace scudo { + +template class ConditionVariableBase { +public: + constexpr ConditionVariableBase() = default; + + void bindTestOnly(HybridMutex &Mutex) { +#if SCUDO_DEBUG + boundMutex = &Mutex; +#else + (void)Mutex; +#endif + } + + void notifyAll(HybridMutex &M) REQUIRES(M) { +#if SCUDO_DEBUG + CHECK_EQ(&M, boundMutex); +#endif + getDerived()->notifyAllImpl(M); + } + + void wait(HybridMutex &M) REQUIRES(M) { +#if SCUDO_DEBUG + CHECK_EQ(&M, boundMutex); +#endif + getDerived()->waitImpl(M); + } + +protected: + Derived *getDerived() { return static_cast(this); } + +#if SCUDO_DEBUG + // Because thread-safety analysis doesn't support pointer aliasing, we are not + // able to mark the proper annotations without false positive. Instead, we + // pass the lock and do the same-lock check separately. + HybridMutex *boundMutex = nullptr; +#endif +}; + +} // namespace scudo + +#endif // SCUDO_CONDITION_VARIABLE_BASE_H_ diff --git a/compiler-rt/lib/scudo/standalone/condition_variable_linux.h b/compiler-rt/lib/scudo/standalone/condition_variable_linux.h new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/scudo/standalone/condition_variable_linux.h @@ -0,0 +1,38 @@ +//===-- condition_variable_linux.h ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_CONDITION_VARIABLE_LINUX_H_ +#define SCUDO_CONDITION_VARIABLE_LINUX_H_ + +#include "platform.h" + +#if SCUDO_LINUX + +#include "atomic_helpers.h" +#include "condition_variable_base.h" +#include "thread_annotations.h" + +namespace scudo { + +class ConditionVariableLinux + : public ConditionVariableBase { +public: + void notifyAllImpl(HybridMutex &M) REQUIRES(M); + + void waitImpl(HybridMutex &M) REQUIRES(M); + +private: + u32 LastNotifyAll = 0; + atomic_u32 Counter = {}; +}; + +} // namespace scudo + +#endif // SCUDO_LINUX + +#endif // SCUDO_CONDITION_VARIABLE_LINUX_H_ diff --git a/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp b/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/scudo/standalone/condition_variable_linux.cpp @@ -0,0 +1,52 @@ +//===-- condition_variable_linux.cpp ----------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "platform.h" + +#if SCUDO_LINUX + +#include "condition_variable_linux.h" + +#include "atomic_helpers.h" + +#include +#include +#include +#include + +namespace scudo { + +void ConditionVariableLinux::notifyAllImpl(UNUSED HybridMutex &M) { + const u32 V = atomic_load_relaxed(&Counter) + 1; + atomic_store_relaxed(&Counter, V); + + // TODO(chiahungduan): Move the waiters from the futex waiting queue + // `Counter` to futex waiting queue `M` so that the awoken threads won't be + // blocked again due to locked `M` by current thread. + if (LastNotifyAll + 1 != V) { + syscall(SYS_futex, reinterpret_cast(&Counter), FUTEX_WAKE_PRIVATE, + INT_MAX, nullptr, nullptr, 0); + } + + LastNotifyAll = V; +} + +void ConditionVariableLinux::waitImpl(HybridMutex &M) { + const u32 V = atomic_load_relaxed(&Counter) + 1; + atomic_store_relaxed(&Counter, V); + + // TODO: Use ScopedUnlock when it's supported. + M.unlock(); + syscall(SYS_futex, reinterpret_cast(&Counter), FUTEX_WAIT_PRIVATE, V, + nullptr, nullptr, 0); + M.lock(); +} + +} // namespace scudo + +#endif // SCUDO_LINUX diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -21,6 +21,8 @@ #include "string_utils.h" #include "thread_annotations.h" +#include "condition_variable.h" + namespace scudo { // SizeClassAllocator64 is an allocator tuned for 64-bit address space. @@ -47,6 +49,8 @@ public: typedef typename Config::Primary::CompactPtrT CompactPtrT; typedef typename Config::Primary::SizeClassMap SizeClassMap; + typedef typename ConditionVariableState< + typename Config::Primary>::ConditionVariableT ConditionVariableT; static const uptr CompactPtrScale = Config::Primary::CompactPtrScale; static const uptr RegionSizeLog = Config::Primary::RegionSizeLog; static const uptr GroupSizeLog = Config::Primary::GroupSizeLog; @@ -66,6 +70,10 @@ static bool canAllocate(uptr Size) { return Size <= SizeClassMap::MaxSize; } + static bool conditionVariableEnabled() { + return ConditionVariableState::enabled(); + } + void init(s32 ReleaseToOsInterval) NO_THREAD_SAFETY_ANALYSIS { DCHECK(isAligned(reinterpret_cast(this), alignof(ThisT))); @@ -120,6 +128,7 @@ for (uptr I = 0; I < NumClasses; I++) { RegionInfo *Region = getRegionInfo(I); + // The actual start of a region is offset by a random number of pages // when PrimaryEnableRandomOffset is set. Region->RegionBeg = (PrimaryBase + (I << RegionSizeLog)) + @@ -141,6 +150,11 @@ } shuffle(RegionInfoArray, NumClasses, &Seed); + // The binding should be done after region shuffling so that it won't bind + // the FLLock from the wrong region. + for (uptr I = 0; I < NumClasses; I++) + getRegionInfo(I)->FLLockCV.bindTestOnly(getRegionInfo(I)->FLLock); + setOption(Option::ReleaseInterval, static_cast(ReleaseToOsInterval)); } @@ -217,26 +231,26 @@ bool PrintStats = false; TransferBatch *B = nullptr; - while (true) { - // When two threads compete for `Region->MMLock`, we only want one of them - // does the populateFreeListAndPopBatch(). To avoid both of them doing - // that, always check the freelist before mapping new pages. - // - // TODO(chiahungduan): Use a condition variable so that we don't need to - // hold `Region->MMLock` here. - ScopedLock ML(Region->MMLock); - { - ScopedLock FL(Region->FLLock); - B = popBatchImpl(C, ClassId, Region); - if (LIKELY(B)) - return B; - } + if (conditionVariableEnabled()) { + B = popBatchWithCV(C, ClassId, Region, PrintStats); + } else { + while (true) { + // When two threads compete for `Region->MMLock`, we only want one of + // them to call populateFreeListAndPopBatch(). To avoid both of them + // doing that, always check the freelist before mapping new pages. + ScopedLock ML(Region->MMLock); + { + ScopedLock FL(Region->FLLock); + if ((B = popBatchImpl(C, ClassId, Region))) + break; + } - const bool RegionIsExhausted = Region->Exhausted; - if (!RegionIsExhausted) - B = populateFreeListAndPopBatch(C, ClassId, Region); - PrintStats = !RegionIsExhausted && Region->Exhausted; - break; + const bool RegionIsExhausted = Region->Exhausted; + if (!RegionIsExhausted) + B = populateFreeListAndPopBatch(C, ClassId, Region); + PrintStats = !RegionIsExhausted && Region->Exhausted; + break; + } } // Note that `getStats()` requires locking each region so we can't call it @@ -267,6 +281,8 @@ if (ClassId == SizeClassMap::BatchClassId) { ScopedLock L(Region->FLLock); pushBatchClassBlocks(Region, Array, Size); + if (conditionVariableEnabled()) + Region->FLLockCV.notifyAll(Region->FLLock); return; } @@ -293,6 +309,8 @@ { ScopedLock L(Region->FLLock); pushBlocksImpl(C, ClassId, Region, Array, Size, SameGroup); + if (conditionVariableEnabled()) + Region->FLLockCV.notifyAll(Region->FLLock); } } @@ -525,6 +543,7 @@ struct UnpaddedRegionInfo { // Mutex for operations on freelist HybridMutex FLLock; + ConditionVariableT FLLockCV GUARDED_BY(FLLock); // Mutex for memmap operations HybridMutex MMLock ACQUIRED_BEFORE(FLLock); // `RegionBeg` is initialized before thread creation and won't be changed. @@ -536,6 +555,7 @@ uptr TryReleaseThreshold GUARDED_BY(MMLock) = 0; ReleaseToOsInfo ReleaseInfo GUARDED_BY(MMLock) = {}; bool Exhausted GUARDED_BY(MMLock) = false; + bool isPopulatingFreeList GUARDED_BY(FLLock) = false; }; struct RegionInfo : UnpaddedRegionInfo { char Padding[SCUDO_CACHE_LINE_SIZE - @@ -818,6 +838,76 @@ InsertBlocks(Cur, Array + Size - Count, Count); } + TransferBatch *popBatchWithCV(CacheT *C, uptr ClassId, RegionInfo *Region, + bool &PrintStats) { + TransferBatch *B = nullptr; + + while (true) { + // We only expect one thread doing the freelist refillment and other + // threads will be waiting for either the completion of the + // `populateFreeListAndPopBatch()` or `pushBlocks()` called by other + // threads. + bool PopulateFreeList = false; + { + ScopedLock FL(Region->FLLock); + if (!Region->isPopulatingFreeList) { + Region->isPopulatingFreeList = true; + PopulateFreeList = true; + } + } + + if (PopulateFreeList) { + ScopedLock ML(Region->MMLock); + + const bool RegionIsExhausted = Region->Exhausted; + if (!RegionIsExhausted) + B = populateFreeListAndPopBatch(C, ClassId, Region); + PrintStats = !RegionIsExhausted && Region->Exhausted; + + { + // Before reacquiring the `FLLock`, the freelist may be used up again + // and some threads are waiting for the freelist refillment by the + // current thread. It's important to set + // `Region->isPopulatingFreeList` to false so the threads about to + // sleep will notice the status change. + ScopedLock FL(Region->FLLock); + Region->isPopulatingFreeList = false; + Region->FLLockCV.notifyAll(Region->FLLock); + } + + break; + } + + // At here, there are two preconditions to be met before waiting, + // 1. The freelist is empty. + // 2. Region->isPopulatingFreeList == true, i.e, someone is still doing + // `populateFreeListAndPopBatch()`. + // + // Note that it has the chance that freelist is empty but + // Region->isPopulatingFreeList == false because all the new populated + // blocks were used up right after the refillment. Therefore, we have to + // check if someone is still populating the freelist. + ScopedLock FL(Region->FLLock); + if (LIKELY(B = popBatchImpl(C, ClassId, Region))) + break; + + if (!Region->isPopulatingFreeList) + continue; + + // Now the freelist is empty and someone's doing the refillment. We will + // wait until anyone refills the freelist or someone finishes doing + // `populateFreeListAndPopBatch()`. The refillment can be done by + // `populateFreeListAndPopBatch()`, `pushBlocks()`, + // `pushBatchClassBlocks()` and `mergeGroupsToReleaseBack()`. + Region->FLLockCV.wait(Region->FLLock); + + if (LIKELY(B = popBatchImpl(C, ClassId, Region))) + break; + } + + return B; + } + // Pop one TransferBatch from a BatchGroup. The BatchGroup with the smallest // group id will be considered first. // @@ -1508,6 +1598,8 @@ if (UNLIKELY(Idx + NeededSlots > MaxUnusedSize)) { ScopedLock L(BatchClassRegion->FLLock); pushBatchClassBlocks(BatchClassRegion, Blocks, Idx); + if (conditionVariableEnabled()) + BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock); Idx = 0; } Blocks[Idx++] = @@ -1543,6 +1635,8 @@ if (Idx != 0) { ScopedLock L(BatchClassRegion->FLLock); pushBatchClassBlocks(BatchClassRegion, Blocks, Idx); + if (conditionVariableEnabled()) + BatchClassRegion->FLLockCV.notifyAll(BatchClassRegion->FLLock); } if (SCUDO_DEBUG) { @@ -1552,6 +1646,9 @@ CHECK_LT(Prev->CompactPtrGroupBase, Cur->CompactPtrGroupBase); } } + + if (conditionVariableEnabled()) + Region->FLLockCV.notifyAll(Region->FLLock); } // TODO: `PrimaryBase` can be obtained from ReservedMemory. This needs to be diff --git a/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt b/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt --- a/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt +++ b/compiler-rt/lib/scudo/standalone/tests/CMakeLists.txt @@ -96,6 +96,7 @@ chunk_test.cpp combined_test.cpp common_test.cpp + condition_variable_test.cpp flags_test.cpp list_test.cpp map_test.cpp diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -12,7 +12,9 @@ #include "allocator_config.h" #include "chunk.h" #include "combined.h" +#include "condition_variable.h" #include "mem_map.h" +#include "size_class_map.h" #include #include @@ -164,13 +166,60 @@ template using ScudoCombinedDeathTest = ScudoCombinedTest; +namespace scudo { +struct TestConditionVariableConfig { + static const bool MaySupportMemoryTagging = true; + template + using TSDRegistryT = + scudo::TSDRegistrySharedT; // Shared, max 8 TSDs. + + struct Primary { + using SizeClassMap = scudo::AndroidSizeClassMap; +#if SCUDO_CAN_USE_PRIMARY64 + static const scudo::uptr RegionSizeLog = 28U; + typedef scudo::u32 CompactPtrT; + static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG; + static const scudo::uptr GroupSizeLog = 20U; + static const bool EnableRandomOffset = true; + static const scudo::uptr MapSizeIncrement = 1UL << 18; +#else + static const scudo::uptr RegionSizeLog = 18U; + static const scudo::uptr GroupSizeLog = 18U; + typedef scudo::uptr CompactPtrT; +#endif + static const scudo::s32 MinReleaseToOsIntervalMs = 1000; + static const scudo::s32 MaxReleaseToOsIntervalMs = 1000; + static const bool UseConditionVariable = true; +#if SCUDO_LINUX + using ConditionVariableT = scudo::ConditionVariableLinux; +#else + using ConditionVariableT = scudo::ConditionVariableDummy; +#endif + }; +#if SCUDO_CAN_USE_PRIMARY64 + template + using PrimaryT = scudo::SizeClassAllocator64; +#else + template + using PrimaryT = scudo::SizeClassAllocator32; +#endif + + struct Secondary { + template + using CacheT = scudo::MapAllocatorNoCache; + }; + template using SecondaryT = scudo::MapAllocator; +}; +} // namespace scudo + #if SCUDO_FUCHSIA #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \ SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, FuchsiaConfig) #else #define SCUDO_TYPED_TEST_ALL_TYPES(FIXTURE, NAME) \ SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, DefaultConfig) \ - SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig) + SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, AndroidConfig) \ + SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TestConditionVariableConfig) #endif #define SCUDO_TYPED_TEST_TYPE(FIXTURE, NAME, TYPE) \ diff --git a/compiler-rt/lib/scudo/standalone/tests/condition_variable_test.cpp b/compiler-rt/lib/scudo/standalone/tests/condition_variable_test.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/scudo/standalone/tests/condition_variable_test.cpp @@ -0,0 +1,59 @@ +//===-- condition_variable_test.cpp -----------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "tests/scudo_unit_test.h" + +#include "common.h" +#include "condition_variable.h" +#include "mutex.h" + +#include + +template void simpleWaitAndNotifyAll() { + constexpr scudo::u32 NumThreads = 2; + constexpr scudo::u32 CounterMax = 1024; + std::thread Threads[NumThreads]; + + scudo::HybridMutex M; + ConditionVariableT CV; + CV.bindTestOnly(M); + scudo::u32 Counter = 0; + + for (scudo::u32 I = 0; I < NumThreads; ++I) { + Threads[I] = std::thread( + [&](scudo::u32 Id) { + bool Running = true; + do { + scudo::ScopedLock L(M); + if (Counter % NumThreads != Id && Counter < CounterMax) + CV.wait(M); + if (Counter >= CounterMax) + Running = false; + else + ++Counter; + CV.notifyAll(M); + } while (Running); + }, + I); + } + + for (std::thread &T : Threads) + T.join(); + + EXPECT_EQ(Counter, CounterMax); +} + +TEST(ScudoConditionVariableTest, DummyCVWaitAndNotifyAll) { + simpleWaitAndNotifyAll(); +} + +#ifdef SCUDO_LINUX +TEST(ScudoConditionVariableTest, LinuxCVWaitAndNotifyAll) { + simpleWaitAndNotifyAll(); +} +#endif diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp @@ -9,6 +9,7 @@ #include "tests/scudo_unit_test.h" #include "allocator_config.h" +#include "condition_variable.h" #include "primary32.h" #include "primary64.h" #include "size_class_map.h" @@ -105,6 +106,34 @@ }; }; +// This is the only test config that enables the condition variable. +template struct TestConfig5 { + static const bool MaySupportMemoryTagging = true; + + struct Primary { + using SizeClassMap = SizeClassMapT; +#if defined(__mips__) + // Unable to allocate greater size on QEMU-user. + static const scudo::uptr RegionSizeLog = 23U; +#else + static const scudo::uptr RegionSizeLog = 24U; +#endif + static const scudo::s32 MinReleaseToOsIntervalMs = INT32_MIN; + static const scudo::s32 MaxReleaseToOsIntervalMs = INT32_MAX; + static const scudo::uptr CompactPtrScale = SCUDO_MIN_ALIGNMENT_LOG; + static const scudo::uptr GroupSizeLog = 18U; + typedef scudo::u32 CompactPtrT; + static const bool EnableRandomOffset = true; + static const scudo::uptr MapSizeIncrement = 1UL << 18; + static const bool UseConditionVariable = true; +#if SCUDO_LINUX + using ConditionVariableT = scudo::ConditionVariableLinux; +#else + using ConditionVariableT = scudo::ConditionVariableDummy; +#endif + }; +}; + template