diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -944,8 +944,8 @@ static const sptr MemTagAllocationTraceIndex = -2; static const sptr MemTagAllocationTidIndex = -1; - u32 Cookie; - u32 QuarantineMaxChunkSize; + u32 Cookie = 0; + u32 QuarantineMaxChunkSize = 0; GlobalStats Stats; PrimaryT Primary; @@ -977,7 +977,7 @@ #endif Entry Entries[NumEntries]; }; - AllocationRingBuffer RingBuffer; + AllocationRingBuffer RingBuffer = {}; // The following might get optimized out by the compiler. NOINLINE void performSanityChecks() { diff --git a/compiler-rt/lib/scudo/standalone/internal_defs.h b/compiler-rt/lib/scudo/standalone/internal_defs.h --- a/compiler-rt/lib/scudo/standalone/internal_defs.h +++ b/compiler-rt/lib/scudo/standalone/internal_defs.h @@ -48,6 +48,34 @@ #define USED __attribute__((used)) #define NOEXCEPT noexcept +// This check is only available on Clang. This is essentially an alias of +// C++20's 'constinit' specifier which will take care of this when (if?) we can +// ask all libc's that use Scudo to compile us with C++20. Dynamic +// initialization is bad; Scudo is designed to be lazy-initializated on the +// first call to malloc/free (and friends), and this generally happens in the +// loader somewhere in libdl's init. After the loader is done, control is +// transferred to libc's initialization, and the dynamic initializers are run. +// If there's a dynamic initializer for Scudo, then it will clobber the +// already-initialized Scudo, and re-initialize all its members back to default +// values, causing various explosions. Unfortunately, marking +// scudo::Allocator<>'s constructor as 'constexpr' isn't sufficient to prevent +// dynamic initialization, as default initialization is fine under 'constexpr' +// (but not 'constinit'). Clang at -O0, and gcc at all opt levels will emit a +// dynamic initializer for any constant-initialized variables if there is a mix +// of default-initialized and constant-initialized variables. +// +// If you're looking at this because your build failed, you probably introduced +// a new member to scudo::Allocator<> (possibly transiently) that didn't have an +// initializer. The fix is easy - just add one. +#if defined(__has_attribute) +#if __has_attribute(require_constant_initialization) +#define SCUDO_REQUIRE_CONSTANT_INITIALIZATION \ + __attribute__((__require_constant_initialization__)) +#else +#define SCUDO_REQUIRE_CONSTANT_INITIALIZATION +#endif +#endif + namespace scudo { typedef unsigned long uptr; diff --git a/compiler-rt/lib/scudo/standalone/list.h b/compiler-rt/lib/scudo/standalone/list.h --- a/compiler-rt/lib/scudo/standalone/list.h +++ b/compiler-rt/lib/scudo/standalone/list.h @@ -57,9 +57,9 @@ void checkConsistency() const; protected: - uptr Size; - T *First; - T *Last; + uptr Size = 0; + T *First = nullptr; + T *Last = nullptr; }; template void IntrusiveList::checkConsistency() const { diff --git a/compiler-rt/lib/scudo/standalone/local_cache.h b/compiler-rt/lib/scudo/standalone/local_cache.h --- a/compiler-rt/lib/scudo/standalone/local_cache.h +++ b/compiler-rt/lib/scudo/standalone/local_cache.h @@ -138,9 +138,9 @@ uptr ClassSize; CompactPtrT Chunks[2 * TransferBatch::MaxNumCached]; }; - PerClass PerClassArray[NumClasses]; + PerClass PerClassArray[NumClasses] = {}; LocalStats Stats; - SizeClassAllocator *Allocator; + SizeClassAllocator *Allocator = nullptr; ALWAYS_INLINE void initCacheMaybe(PerClass *C) { if (LIKELY(C->MaxCount)) diff --git a/compiler-rt/lib/scudo/standalone/mutex.h b/compiler-rt/lib/scudo/standalone/mutex.h --- a/compiler-rt/lib/scudo/standalone/mutex.h +++ b/compiler-rt/lib/scudo/standalone/mutex.h @@ -48,9 +48,9 @@ static constexpr u8 NumberOfYields = 8U; #if SCUDO_LINUX - atomic_u32 M; + atomic_u32 M = {}; #elif SCUDO_FUCHSIA - sync_mutex_t M; + sync_mutex_t M = {}; #endif void lockSlow(); diff --git a/compiler-rt/lib/scudo/standalone/options.h b/compiler-rt/lib/scudo/standalone/options.h --- a/compiler-rt/lib/scudo/standalone/options.h +++ b/compiler-rt/lib/scudo/standalone/options.h @@ -44,9 +44,8 @@ } struct AtomicOptions { - atomic_u32 Val; + atomic_u32 Val = {}; -public: Options load() const { return Options{atomic_load_relaxed(&Val)}; } void clear(OptionBit Opt) { diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -489,17 +489,17 @@ return TotalReleasedBytes; } - SizeClassInfo SizeClassInfoArray[NumClasses]; + SizeClassInfo SizeClassInfoArray[NumClasses] = {}; // Track the regions in use, 0 is unused, otherwise store ClassId + 1. - ByteMap PossibleRegions; - atomic_s32 ReleaseToOsIntervalMs; + ByteMap PossibleRegions = {}; + atomic_s32 ReleaseToOsIntervalMs = {}; // Unless several threads request regions simultaneously from different size // classes, the stash rarely contains more than 1 entry. static constexpr uptr MaxStashedRegions = 4; HybridMutex RegionsStashMutex; - uptr NumberOfStashedRegions; - uptr RegionsStash[MaxStashedRegions]; + uptr NumberOfStashedRegions = 0; + uptr RegionsStash[MaxStashedRegions] = {}; }; } // namespace scudo diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -285,24 +285,24 @@ struct UnpaddedRegionInfo { HybridMutex Mutex; SinglyLinkedList FreeList; - uptr RegionBeg; - RegionStats Stats; - u32 RandState; - uptr MappedUser; // Bytes mapped for user memory. - uptr AllocatedUser; // Bytes allocated for user memory. - MapPlatformData Data; - ReleaseToOsInfo ReleaseInfo; - bool Exhausted; + uptr RegionBeg = 0; + RegionStats Stats = {}; + u32 RandState = 0; + uptr MappedUser = 0; // Bytes mapped for user memory. + uptr AllocatedUser = 0; // Bytes allocated for user memory. + MapPlatformData Data = {}; + ReleaseToOsInfo ReleaseInfo = {}; + bool Exhausted = false; }; struct RegionInfo : UnpaddedRegionInfo { char Padding[SCUDO_CACHE_LINE_SIZE - - (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)]; + (sizeof(UnpaddedRegionInfo) % SCUDO_CACHE_LINE_SIZE)] = {}; }; static_assert(sizeof(RegionInfo) % SCUDO_CACHE_LINE_SIZE == 0, ""); - uptr PrimaryBase; - MapPlatformData Data; - atomic_s32 ReleaseToOsIntervalMs; + uptr PrimaryBase = 0; + MapPlatformData Data = {}; + atomic_s32 ReleaseToOsIntervalMs = {}; alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses]; RegionInfo *getRegionInfo(uptr ClassId) { diff --git a/compiler-rt/lib/scudo/standalone/quarantine.h b/compiler-rt/lib/scudo/standalone/quarantine.h --- a/compiler-rt/lib/scudo/standalone/quarantine.h +++ b/compiler-rt/lib/scudo/standalone/quarantine.h @@ -161,7 +161,7 @@ private: SinglyLinkedList List; - atomic_uptr Size; + atomic_uptr Size = {}; void addToSize(uptr add) { atomic_store_relaxed(&Size, getSize() + add); } void subFromSize(uptr sub) { atomic_store_relaxed(&Size, getSize() - sub); } @@ -246,9 +246,9 @@ alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex; CacheT Cache; alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecycleMutex; - atomic_uptr MinSize; - atomic_uptr MaxSize; - alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize; + atomic_uptr MinSize = {}; + atomic_uptr MaxSize = {}; + alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize = {}; void NOINLINE recycle(uptr MinSize, Callback Cb) { CacheT Tmp; diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -377,16 +377,16 @@ } HybridMutex Mutex; - u32 EntriesCount; - u32 QuarantinePos; - atomic_u32 MaxEntriesCount; - atomic_uptr MaxEntrySize; - u64 OldestTime; - u32 IsFullEvents; - atomic_s32 ReleaseToOsIntervalMs; - - CachedBlock Entries[Config::SecondaryCacheEntriesArraySize]; - CachedBlock Quarantine[Config::SecondaryCacheQuarantineSize]; + u32 EntriesCount = 0; + u32 QuarantinePos = 0; + atomic_u32 MaxEntriesCount = {}; + atomic_uptr MaxEntrySize = {}; + u64 OldestTime = 0; + u32 IsFullEvents = 0; + atomic_s32 ReleaseToOsIntervalMs = {}; + + CachedBlock Entries[Config::SecondaryCacheEntriesArraySize] = {}; + CachedBlock Quarantine[Config::SecondaryCacheQuarantineSize] = {}; }; template class MapAllocator { @@ -451,11 +451,11 @@ HybridMutex Mutex; DoublyLinkedList InUseBlocks; - uptr AllocatedBytes; - uptr FreedBytes; - uptr LargestSize; - u32 NumberOfAllocs; - u32 NumberOfFrees; + uptr AllocatedBytes = 0; + uptr FreedBytes = 0; + uptr LargestSize = 0; + u32 NumberOfAllocs = 0; + u32 NumberOfFrees = 0; LocalStats Stats; }; diff --git a/compiler-rt/lib/scudo/standalone/stack_depot.h b/compiler-rt/lib/scudo/standalone/stack_depot.h --- a/compiler-rt/lib/scudo/standalone/stack_depot.h +++ b/compiler-rt/lib/scudo/standalone/stack_depot.h @@ -40,7 +40,7 @@ class StackDepot { HybridMutex RingEndMu; - u32 RingEnd; + u32 RingEnd = 0; // This data structure stores a stack trace for each allocation and // deallocation when stack trace recording is enabled, that may be looked up @@ -70,7 +70,7 @@ #endif static const uptr TabSize = 1 << TabBits; static const uptr TabMask = TabSize - 1; - atomic_u32 Tab[TabSize]; + atomic_u32 Tab[TabSize] = {}; #ifdef SCUDO_FUZZ static const uptr RingBits = 4; @@ -79,7 +79,7 @@ #endif static const uptr RingSize = 1 << RingBits; static const uptr RingMask = RingSize - 1; - atomic_u64 Ring[RingSize]; + atomic_u64 Ring[RingSize] = {}; public: // Insert hash of the stack trace [Begin, End) into the stack depot, and diff --git a/compiler-rt/lib/scudo/standalone/stats.h b/compiler-rt/lib/scudo/standalone/stats.h --- a/compiler-rt/lib/scudo/standalone/stats.h +++ b/compiler-rt/lib/scudo/standalone/stats.h @@ -46,11 +46,11 @@ uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); } - LocalStats *Next; - LocalStats *Prev; + LocalStats *Next = nullptr; + LocalStats *Prev = nullptr; private: - atomic_uptr StatsArray[StatCount]; + atomic_uptr StatsArray[StatCount] = {}; }; // Global stats, used for aggregation and querying. diff --git a/compiler-rt/lib/scudo/standalone/tsd.h b/compiler-rt/lib/scudo/standalone/tsd.h --- a/compiler-rt/lib/scudo/standalone/tsd.h +++ b/compiler-rt/lib/scudo/standalone/tsd.h @@ -26,7 +26,7 @@ template struct alignas(SCUDO_CACHE_LINE_SIZE) TSD { typename Allocator::CacheT Cache; typename Allocator::QuarantineCacheT QuarantineCache; - u8 DestructorIterations; + u8 DestructorIterations = 0; void initLinkerInitialized(Allocator *Instance) { Instance->initCache(&Cache); @@ -59,7 +59,7 @@ private: HybridMutex Mutex; - atomic_uptr Precedence; + atomic_uptr Precedence = {}; }; } // namespace scudo diff --git a/compiler-rt/lib/scudo/standalone/tsd_exclusive.h b/compiler-rt/lib/scudo/standalone/tsd_exclusive.h --- a/compiler-rt/lib/scudo/standalone/tsd_exclusive.h +++ b/compiler-rt/lib/scudo/standalone/tsd_exclusive.h @@ -108,9 +108,9 @@ Instance->callPostInitCallback(); } - pthread_key_t PThreadKey; - bool Initialized; - atomic_u8 Disabled; + pthread_key_t PThreadKey = {}; + bool Initialized = false; + atomic_u8 Disabled = {}; TSD FallbackTSD; HybridMutex Mutex; static thread_local ThreadState State; diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c.cpp --- a/compiler-rt/lib/scudo/standalone/wrappers_c.cpp +++ b/compiler-rt/lib/scudo/standalone/wrappers_c.cpp @@ -26,6 +26,7 @@ // Export the static allocator so that the C++ wrappers can access it. // Technically we could have a completely separated heap for C & C++ but in // reality the amount of cross pollination between the two is staggering. +SCUDO_REQUIRE_CONSTANT_INITIALIZATION scudo::Allocator SCUDO_ALLOCATOR; #include "wrappers_c.inc" diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp --- a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp +++ b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp @@ -23,6 +23,7 @@ #define SCUDO_ALLOCATOR Allocator extern "C" void SCUDO_PREFIX(malloc_postinit)(); +SCUDO_REQUIRE_CONSTANT_INITIALIZATION static scudo::Allocator SCUDO_ALLOCATOR; @@ -36,6 +37,7 @@ #define SCUDO_ALLOCATOR SvelteAllocator extern "C" void SCUDO_PREFIX(malloc_postinit)(); +SCUDO_REQUIRE_CONSTANT_INITIALIZATION static scudo::Allocator SCUDO_ALLOCATOR;