Index: lib/scudo/CMakeLists.txt =================================================================== --- lib/scudo/CMakeLists.txt +++ lib/scudo/CMakeLists.txt @@ -14,6 +14,7 @@ scudo_interceptors.cpp scudo_new_delete.cpp scudo_termination.cpp + scudo_tls_linux.cpp scudo_utils.cpp) # Enable the SSE 4.2 instruction set for scudo_crc32.cpp, if available. Index: lib/scudo/scudo_allocator.h =================================================================== --- lib/scudo/scudo_allocator.h +++ lib/scudo/scudo_allocator.h @@ -53,7 +53,7 @@ u64 Offset : 16; // Offset from the beginning of the backend // allocation to the beginning of the chunk // itself, in multiples of MinAlignment. See - /// comment about its maximum value and in init(). + // comment about its maximum value and in init(). u64 Salt : 8; }; @@ -62,7 +62,7 @@ // Minimum alignment of 8 bytes for 32-bit, 16 for 64-bit const uptr MinAlignmentLog = FIRST_32_SECOND_64(3, 4); -const uptr MaxAlignmentLog = 24; // 16 MB +const uptr MaxAlignmentLog = 24; // 16 MB const uptr MinAlignment = 1 << MinAlignmentLog; const uptr MaxAlignment = 1 << MaxAlignmentLog; @@ -70,21 +70,44 @@ const uptr AlignedChunkHeaderSize = (ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1); -struct AllocatorOptions { - u32 QuarantineSizeMb; - u32 ThreadLocalQuarantineSizeKb; - bool MayReturnNull; - s32 ReleaseToOSIntervalMs; - bool DeallocationTypeMismatch; - bool DeleteSizeMismatch; - bool ZeroContents; - - void setFrom(const Flags *f, const CommonFlags *cf); - void copyTo(Flags *f, CommonFlags *cf) const; +#if SANITIZER_CAN_USE_ALLOCATOR64 +const uptr AllocatorSpace = ~0ULL; +const uptr AllocatorSize = 0x40000000000ULL; // 4TB. +typedef DefaultSizeClassMap SizeClassMap; +struct AP { + static const uptr kSpaceBeg = AllocatorSpace; + static const uptr kSpaceSize = AllocatorSize; + static const uptr kMetadataSize = 0; + typedef __scudo::SizeClassMap SizeClassMap; + typedef NoOpMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = + SizeClassAllocator64FlagMasks::kRandomShuffleChunks; }; +typedef SizeClassAllocator64 PrimaryAllocator; +#else +// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the +// security improvements brought to the 64-bit one. This makes the 32-bit +// version of Scudo slightly less toughened. +static const uptr RegionSizeLog = 20; +static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog; +# if SANITIZER_WORDSIZE == 32 +typedef FlatByteMap ByteMap; +# elif SANITIZER_WORDSIZE == 64 +typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap; +# endif // SANITIZER_WORDSIZE +typedef DefaultSizeClassMap SizeClassMap; +typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap, + RegionSizeLog, ByteMap> PrimaryAllocator; +#endif // SANITIZER_CAN_USE_ALLOCATOR64 + +#include "scudo_allocator_secondary.h" -void initAllocator(const AllocatorOptions &options); -void drainQuarantine(); +typedef SizeClassAllocatorLocalCache AllocatorCache; +typedef ScudoLargeMmapAllocator SecondaryAllocator; +typedef CombinedAllocator + ScudoBackendAllocator; + +void initScudo(); void *scudoMalloc(uptr Size, AllocType Type); void scudoFree(void *Ptr, AllocType Type); @@ -98,8 +121,6 @@ void *scudoAlignedAlloc(uptr Alignment, uptr Size); uptr scudoMallocUsableSize(void *Ptr); -#include "scudo_allocator_secondary.h" - } // namespace __scudo #endif // SCUDO_ALLOCATOR_H_ Index: lib/scudo/scudo_allocator.cpp =================================================================== --- lib/scudo/scudo_allocator.cpp +++ lib/scudo/scudo_allocator.cpp @@ -15,6 +15,7 @@ //===----------------------------------------------------------------------===// #include "scudo_allocator.h" +#include "scudo_tls.h" #include "scudo_utils.h" #include "sanitizer_common/sanitizer_allocator_interface.h" @@ -26,44 +27,6 @@ namespace __scudo { -#if SANITIZER_CAN_USE_ALLOCATOR64 -const uptr AllocatorSpace = ~0ULL; -const uptr AllocatorSize = 0x40000000000ULL; -typedef DefaultSizeClassMap SizeClassMap; -struct AP { - static const uptr kSpaceBeg = AllocatorSpace; - static const uptr kSpaceSize = AllocatorSize; - static const uptr kMetadataSize = 0; - typedef __scudo::SizeClassMap SizeClassMap; - typedef NoOpMapUnmapCallback MapUnmapCallback; - static const uptr kFlags = - SizeClassAllocator64FlagMasks::kRandomShuffleChunks; -}; -typedef SizeClassAllocator64 PrimaryAllocator; -#else -// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the -// security improvements brought to the 64-bit one. This makes the 32-bit -// version of Scudo slightly less toughened. -static const uptr RegionSizeLog = 20; -static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog; -# if SANITIZER_WORDSIZE == 32 -typedef FlatByteMap ByteMap; -# elif SANITIZER_WORDSIZE == 64 -typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap; -# endif // SANITIZER_WORDSIZE -typedef DefaultSizeClassMap SizeClassMap; -typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap, - RegionSizeLog, ByteMap> PrimaryAllocator; -#endif // SANITIZER_CAN_USE_ALLOCATOR64 - -typedef SizeClassAllocatorLocalCache AllocatorCache; -typedef ScudoLargeMmapAllocator SecondaryAllocator; -typedef CombinedAllocator - ScudoBackendAllocator; - -static ScudoBackendAllocator &getBackendAllocator(); - -static thread_local Xorshift128Plus Prng; // Global static cookie, initialized at start-up. static uptr Cookie; @@ -88,6 +51,8 @@ #endif // defined(__SSE4_2__) } +static ScudoBackendAllocator &getBackendAllocator(); + struct ScudoChunk : UnpackedHeader { // We can't use the offset member of the chunk itself, as we would double // fetch it without any warranty that it wouldn't have been tampered. To @@ -188,32 +153,44 @@ return reinterpret_cast(UserBeg - AlignedChunkHeaderSize); } -static bool ScudoInitIsRunning = false; +struct AllocatorOptions { + u32 QuarantineSizeMb; + u32 ThreadLocalQuarantineSizeKb; + bool MayReturnNull; + s32 ReleaseToOSIntervalMs; + bool DeallocationTypeMismatch; + bool DeleteSizeMismatch; + bool ZeroContents; -static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT; -static pthread_key_t PThreadKey; + void setFrom(const Flags *f, const CommonFlags *cf); + void copyTo(Flags *f, CommonFlags *cf) const; +}; + +void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) { + MayReturnNull = cf->allocator_may_return_null; + ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms; + QuarantineSizeMb = f->QuarantineSizeMb; + ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb; + DeallocationTypeMismatch = f->DeallocationTypeMismatch; + DeleteSizeMismatch = f->DeleteSizeMismatch; + ZeroContents = f->ZeroContents; +} -static thread_local bool ThreadInited = false; -static thread_local bool ThreadTornDown = false; -static thread_local AllocatorCache Cache; - -static void teardownThread(void *p) { - uptr v = reinterpret_cast(p); - // The glibc POSIX thread-local-storage deallocation routine calls user - // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS. - // We want to be called last since other destructors might call free and the - // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the - // quarantine and swallowing the cache. - if (v < PTHREAD_DESTRUCTOR_ITERATIONS) { - pthread_setspecific(PThreadKey, reinterpret_cast(v + 1)); - return; - } - drainQuarantine(); - getBackendAllocator().DestroyCache(&Cache); - ThreadTornDown = true; +void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const { + cf->allocator_may_return_null = MayReturnNull; + cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs; + f->QuarantineSizeMb = QuarantineSizeMb; + f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb; + f->DeallocationTypeMismatch = DeallocationTypeMismatch; + f->DeleteSizeMismatch = DeleteSizeMismatch; + f->ZeroContents = ZeroContents; } -static void initInternal() { +static void initScudoInternal(const AllocatorOptions &Options); + +static bool ScudoInitIsRunning = false; + +void initScudo() { SanitizerToolName = "Scudo"; CHECK(!ScudoInitIsRunning && "Scudo init calls itself!"); ScudoInitIsRunning = true; @@ -227,25 +204,13 @@ AllocatorOptions Options; Options.setFrom(getFlags(), common_flags()); - initAllocator(Options); + initScudoInternal(Options); - MaybeStartBackgroudThread(); + // TODO(kostyak): determine if MaybeStartBackgroudThread could be of some use. ScudoInitIsRunning = false; } -static void initGlobal() { - pthread_key_create(&PThreadKey, teardownThread); - initInternal(); -} - -static void NOINLINE initThread() { - pthread_once(&GlobalInited, initGlobal); - pthread_setspecific(PThreadKey, reinterpret_cast(1)); - getBackendAllocator().InitCache(&Cache); - ThreadInited = true; -} - struct QuarantineCallback { explicit QuarantineCallback(AllocatorCache *Cache) : Cache_(Cache) {} @@ -278,26 +243,20 @@ typedef Quarantine ScudoQuarantine; typedef ScudoQuarantine::Cache ScudoQuarantineCache; -static thread_local ScudoQuarantineCache ThreadQuarantineCache; +COMPILER_CHECK(sizeof(ScudoQuarantineCache) <= + sizeof(ScudoThreadContext::QuarantineCachePlaceHolder)); -void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) { - MayReturnNull = cf->allocator_may_return_null; - ReleaseToOSIntervalMs = cf->allocator_release_to_os_interval_ms; - QuarantineSizeMb = f->QuarantineSizeMb; - ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb; - DeallocationTypeMismatch = f->DeallocationTypeMismatch; - DeleteSizeMismatch = f->DeleteSizeMismatch; - ZeroContents = f->ZeroContents; +AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) { + return &ThreadContext->Cache; } -void AllocatorOptions::copyTo(Flags *f, CommonFlags *cf) const { - cf->allocator_may_return_null = MayReturnNull; - cf->allocator_release_to_os_interval_ms = ReleaseToOSIntervalMs; - f->QuarantineSizeMb = QuarantineSizeMb; - f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb; - f->DeallocationTypeMismatch = DeallocationTypeMismatch; - f->DeleteSizeMismatch = DeleteSizeMismatch; - f->ZeroContents = ZeroContents; +ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) { + return reinterpret_cast< + ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder); +} + +Xorshift128Plus *getPrng(ScudoThreadContext *ThreadContext) { + return &ThreadContext->Prng; } struct ScudoAllocator { @@ -313,6 +272,7 @@ StaticSpinMutex FallbackMutex; AllocatorCache FallbackAllocatorCache; ScudoQuarantineCache FallbackQuarantineCache; + Xorshift128Plus FallbackPrng; bool DeallocationTypeMismatch; bool ZeroContents; @@ -361,13 +321,13 @@ static_cast(Options.QuarantineSizeMb) << 20, static_cast(Options.ThreadLocalQuarantineSizeKb) << 10); BackendAllocator.InitCache(&FallbackAllocatorCache); - Cookie = Prng.getNext(); + FallbackPrng.initFromURandom(); + Cookie = FallbackPrng.getNext(); } // Helper function that checks for a valid Scudo chunk. nullptr isn't. bool isValidPointer(const void *UserPtr) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); if (!UserPtr) return false; uptr UserBeg = reinterpret_cast(UserPtr); @@ -379,8 +339,7 @@ // Allocates a chunk. void *allocate(uptr Size, uptr Alignment, AllocType Type, bool ForceZeroContents = false) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); if (UNLIKELY(!IsPowerOfTwo(Alignment))) { dieWithMessage("ERROR: alignment is not a power of 2\n"); } @@ -407,11 +366,16 @@ bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment); void *Ptr; + uptr Salt; uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment; - if (LIKELY(!ThreadTornDown)) { - Ptr = BackendAllocator.Allocate(&Cache, NeededSize, AllocationAlignment); + ScudoThreadContext *ThreadContext = getThreadContext(); + if (LIKELY(ThreadContext)) { + Salt = getPrng(ThreadContext)->getNext(); + Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext), + NeededSize, AllocationAlignment); } else { SpinMutexLock l(&FallbackMutex); + Salt = FallbackPrng.getNext(); Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize, AllocationAlignment); } @@ -453,7 +417,7 @@ if (TrailingBytes) Header.SizeOrUnusedBytes = PageSize - TrailingBytes; } - Header.Salt = static_cast(Prng.getNext()); + Header.Salt = static_cast(Salt); getScudoChunk(UserBeg)->storeHeader(&Header); void *UserPtr = reinterpret_cast(UserBeg); // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size); @@ -462,16 +426,17 @@ // Place a chunk in the quarantine. In the event of a zero-sized quarantine, // we directly deallocate the chunk, otherwise the flow would lead to the - // chunk being checksummed twice, once before Put and once in Recycle, with - // no additional security value. + // chunk being loaded (and checked) twice, and stored (and checksummed) once, + // with no additional security value. void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header, uptr Size) { bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0); if (BypassQuarantine) { Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(Header); - if (LIKELY(!ThreadTornDown)) { - getBackendAllocator().Deallocate(&Cache, Ptr); + ScudoThreadContext *ThreadContext = getThreadContext(); + if (LIKELY(ThreadContext)) { + getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr); } else { SpinMutexLock Lock(&FallbackMutex); getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr); @@ -480,9 +445,12 @@ UnpackedHeader NewHeader = *Header; NewHeader.State = ChunkQuarantine; Chunk->compareExchangeHeader(&NewHeader, Header); - if (LIKELY(!ThreadTornDown)) { - AllocatorQuarantine.Put(&ThreadQuarantineCache, - QuarantineCallback(&Cache), Chunk, Size); + ScudoThreadContext *ThreadContext = getThreadContext(); + if (LIKELY(ThreadContext)) { + AllocatorQuarantine.Put(getQuarantineCache(ThreadContext), + QuarantineCallback( + getAllocatorCache(ThreadContext)), + Chunk, Size); } else { SpinMutexLock l(&FallbackMutex); AllocatorQuarantine.Put(&FallbackQuarantineCache, @@ -495,8 +463,7 @@ // Deallocates a Chunk, which means adding it to the delayed free list (or // Quarantine). void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr); if (!UserPtr) return; @@ -542,8 +509,7 @@ // Reallocates a chunk. We can save on a new allocation if the new requested // size still fits in the chunk. void *reallocate(void *OldPtr, uptr NewSize) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); uptr UserBeg = reinterpret_cast(OldPtr); if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) { dieWithMessage("ERROR: attempted to reallocate a chunk not properly " @@ -585,8 +551,7 @@ // Helper function that returns the actual usable size of a chunk. uptr getUsableSize(const void *Ptr) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); if (!Ptr) return 0; uptr UserBeg = reinterpret_cast(Ptr); @@ -602,22 +567,22 @@ } void *calloc(uptr NMemB, uptr Size) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); uptr Total = NMemB * Size; if (Size != 0 && Total / Size != NMemB) // Overflow check return BackendAllocator.ReturnNullOrDieOnBadRequest(); return allocate(Total, MinAlignment, FromMalloc, true); } - void drainQuarantine() { - AllocatorQuarantine.Drain(&ThreadQuarantineCache, - QuarantineCallback(&Cache)); + void commitBack(ScudoThreadContext *ThreadContext) { + AllocatorCache *Cache = getAllocatorCache(ThreadContext); + AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext), + QuarantineCallback(Cache)); + BackendAllocator.DestroyCache(Cache); } uptr getStats(AllocatorStat StatType) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); uptr stats[AllocatorStatCount]; BackendAllocator.GetStats(stats); return stats[StatType]; @@ -630,12 +595,18 @@ return Instance.BackendAllocator; } -void initAllocator(const AllocatorOptions &Options) { +static void initScudoInternal(const AllocatorOptions &Options) { Instance.init(Options); } -void drainQuarantine() { - Instance.drainQuarantine(); +void ScudoThreadContext::init() { + getBackendAllocator().InitCache(&Cache); + Prng.initFromURandom(); + memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); +} + +void ScudoThreadContext::commitBack() { + Instance.commitBack(this); } void *scudoMalloc(uptr Size, AllocType Type) { Index: lib/scudo/scudo_tls.h =================================================================== --- /dev/null +++ lib/scudo/scudo_tls.h @@ -0,0 +1,40 @@ +//===-- scudo_tls.h ---------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Scudo thread local structure definition. +/// Implementation will differ based on the thread local storage primitives +/// offered by the underlying platform. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_TLS_H_ +#define SCUDO_TLS_H_ + +#include "scudo_allocator.h" +#include "scudo_utils.h" + +namespace __scudo { + +struct ALIGNED(64) ScudoThreadContext { + public: + AllocatorCache Cache; + Xorshift128Plus Prng; + uptr QuarantineCachePlaceHolder[4]; + void init(); + void commitBack(); +}; + +void initThread(); + +// Fastpath functions are defined in the following platform specific headers. +#include "scudo_tls_linux.h" + +} // namespace __scudo + +#endif // SCUDO_TLS_H_ Index: lib/scudo/scudo_tls_linux.h =================================================================== --- /dev/null +++ lib/scudo/scudo_tls_linux.h @@ -0,0 +1,48 @@ +//===-- scudo_tls_linux.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Scudo thread local structure fastpath functions implementation for platforms +/// supporting thread_local. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_TLS_LINUX_H_ +#define SCUDO_TLS_LINUX_H_ + +#ifndef SCUDO_TLS_H_ +# error "This file must be included inside scudo_tls.h." +#endif // SCUDO_TLS_H_ + +#include "sanitizer_common/sanitizer_platform.h" + +#if SANITIZER_LINUX + +enum ThreadState : u8 { + ThreadNotInitialized = 0, + ThreadInitialized, + ThreadTornDown, +}; +extern thread_local ThreadState ScudoThreadState; +extern thread_local ScudoThreadContext ThreadLocalContext; + +ALWAYS_INLINE void initThreadMaybe() { + if (LIKELY(ScudoThreadState != ThreadNotInitialized)) + return; + initThread(); +} + +ALWAYS_INLINE ScudoThreadContext *getThreadContext() { + if (UNLIKELY(ScudoThreadState == ThreadTornDown)) + return nullptr; + return &ThreadLocalContext; +} + +#endif // SANITIZER_LINUX + +#endif // SCUDO_TLS_LINUX_H_ Index: lib/scudo/scudo_tls_linux.cpp =================================================================== --- /dev/null +++ lib/scudo/scudo_tls_linux.cpp @@ -0,0 +1,62 @@ +//===-- scudo_tls_linux.cpp -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Scudo thread local structure implementation for platforms supporting +/// thread_local. +/// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" + +#if SANITIZER_LINUX + +#include "scudo_tls.h" + +#include +#include + +namespace __scudo { + +static pthread_once_t GlobalInitialized = PTHREAD_ONCE_INIT; +static pthread_key_t PThreadKey; + +thread_local ThreadState ScudoThreadState = ThreadNotInitialized; +thread_local ScudoThreadContext ThreadLocalContext; + +static void teardownThread(void *Ptr) { + uptr Iteration = reinterpret_cast(Ptr); + // The glibc POSIX thread-local-storage deallocation routine calls user + // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS. + // We want to be called last since other destructors might call free and the + // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the + // quarantine and swallowing the cache. + if (Iteration < PTHREAD_DESTRUCTOR_ITERATIONS) { + pthread_setspecific(PThreadKey, reinterpret_cast(Iteration + 1)); + return; + } + ThreadLocalContext.commitBack(); + ScudoThreadState = ThreadTornDown; +} + + +static void initOnce() { + CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0); + initScudo(); +} + +void initThread() { + pthread_once(&GlobalInitialized, initOnce); + pthread_setspecific(PThreadKey, reinterpret_cast(1)); + ThreadLocalContext.init(); + ScudoThreadState = ThreadInitialized; +} + +} // namespace __scudo + +#endif // SANITIZER_LINUX Index: lib/scudo/scudo_utils.h =================================================================== --- lib/scudo/scudo_utils.h +++ lib/scudo/scudo_utils.h @@ -40,7 +40,7 @@ // The state (128 bits) will be stored in thread local storage. struct Xorshift128Plus { public: - Xorshift128Plus(); + void initFromURandom(); u64 getNext() { u64 x = State[0]; const u64 y = State[1]; Index: lib/scudo/scudo_utils.cpp =================================================================== --- lib/scudo/scudo_utils.cpp +++ lib/scudo/scudo_utils.cpp @@ -153,9 +153,9 @@ } } -// Default constructor for Xorshift128Plus seeds the state with /dev/urandom. +// Seeds the xorshift state with /dev/urandom. // TODO(kostyak): investigate using getrandom() if available. -Xorshift128Plus::Xorshift128Plus() { +void Xorshift128Plus::initFromURandom() { fillRandom(reinterpret_cast(State), sizeof(State)); }