diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h --- a/compiler-rt/lib/scudo/standalone/allocator_config.h +++ b/compiler-rt/lib/scudo/standalone/allocator_config.h @@ -40,15 +40,15 @@ using SizeClassMap = AndroidSizeClassMap; #if SCUDO_CAN_USE_PRIMARY64 // 256MB regions - typedef SizeClassAllocator64 Primary; #else // 256KB regions - typedef SizeClassAllocator32 Primary; + typedef SizeClassAllocator32 Primary; #endif // Cache blocks up to 2MB - typedef MapAllocator> Secondary; + typedef MapAllocator> Secondary; template using TSDRegistryT = TSDRegistrySharedT; // Shared, max 2 TSDs. }; @@ -57,12 +57,12 @@ using SizeClassMap = SvelteSizeClassMap; #if SCUDO_CAN_USE_PRIMARY64 // 128MB regions - typedef SizeClassAllocator64 Primary; + typedef SizeClassAllocator64 Primary; #else // 64KB regions - typedef SizeClassAllocator32 Primary; + typedef SizeClassAllocator32 Primary; #endif - typedef MapAllocator> Secondary; + typedef MapAllocator> Secondary; template using TSDRegistryT = TSDRegistrySharedT; // Shared, only 1 TSD. }; diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -32,6 +32,8 @@ namespace scudo { +enum class Option { PrimaryReleaseInterval, SecondaryReleaseInterval }; + template class Allocator { public: @@ -613,8 +615,16 @@ return Options.MayReturnNull; } - // TODO(kostyak): implement this as a "backend" to mallopt. - bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; } + bool setOption(Option O, sptr Value) { + if (O == Option::PrimaryReleaseInterval) { + Primary.setReleaseToOsIntervalMs(Value); + return true; + } else if (O == Option::SecondaryReleaseInterval) { + Secondary.setReleaseToOsIntervalMs(Value); + return true; + } + return false; + } // Return the usable size for a given chunk. Technically we lie, as we just // report the actual size of a chunk. This is done to counteract code actively diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -38,14 +38,16 @@ // Memory used by this allocator is never unmapped but can be partially // reclaimed if the platform allows for it. -template class SizeClassAllocator32 { +template class SizeClassAllocator32 { public: typedef SizeClassMapT SizeClassMap; // The bytemap can only track UINT8_MAX - 1 classes. static_assert(SizeClassMap::LargestClassId <= (UINT8_MAX - 1), ""); // Regions should be large enough to hold the largest Block. static_assert((1UL << RegionSizeLog) >= SizeClassMap::MaxSize, ""); - typedef SizeClassAllocator32 ThisT; + typedef SizeClassAllocator32 ThisT; typedef SizeClassAllocatorLocalCache CacheT; typedef typename CacheT::TransferBatch TransferBatch; static const bool SupportsMemoryTagging = false; @@ -78,7 +80,10 @@ Sci->CanRelease = (I != SizeClassMap::BatchClassId) && (getSizeByClassId(I) >= (PageSize / 32)); } - ReleaseToOsIntervalMs = ReleaseToOsInterval; + if (ReleaseToOsInterval < 0) { + ReleaseToOsInterval = DefaultReleaseToOsIntervalMs; + } + setReleaseToOsIntervalMs(ReleaseToOsInterval); } void init(s32 ReleaseToOsInterval) { memset(this, 0, sizeof(*this)); @@ -176,6 +181,10 @@ getStats(Str, I, 0); } + void setReleaseToOsIntervalMs(s32 Interval) { + atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed); + } + uptr releaseToOS() { uptr TotalReleasedBytes = 0; for (uptr I = 0; I < NumClasses; I++) { @@ -356,6 +365,10 @@ AvailableChunks, Rss >> 10, Sci->ReleaseInfo.RangesReleased); } + s32 getReleaseToOsIntervalMs() { + return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed); + } + NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId, bool Force = false) { const uptr BlockSize = getSizeByClassId(ClassId); @@ -374,7 +387,7 @@ } if (!Force) { - const s32 IntervalMs = ReleaseToOsIntervalMs; + const s32 IntervalMs = getReleaseToOsIntervalMs(); if (IntervalMs < 0) return 0; if (Sci->ReleaseInfo.LastReleaseAtNs + @@ -414,7 +427,7 @@ // through the whole NumRegions. uptr MinRegionIndex; uptr MaxRegionIndex; - s32 ReleaseToOsIntervalMs; + atomic_s32 ReleaseToOsIntervalMs; // Unless several threads request regions simultaneously from different size // classes, the stash rarely contains more than 1 entry. static constexpr uptr MaxStashedRegions = 4; diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -40,11 +40,13 @@ // released if the platform allows for it. template class SizeClassAllocator64 { public: typedef SizeClassMapT SizeClassMap; typedef SizeClassAllocator64 ThisT; typedef SizeClassAllocatorLocalCache CacheT; @@ -90,7 +92,10 @@ (getSizeByClassId(I) >= (PageSize / 32)); Region->RandState = getRandomU32(&Seed); } - ReleaseToOsIntervalMs = ReleaseToOsInterval; + if (ReleaseToOsInterval < 0) { + ReleaseToOsInterval = DefaultReleaseToOsIntervalMs; + } + setReleaseToOsIntervalMs(ReleaseToOsInterval); if (SupportsMemoryTagging) UseMemoryTagging = systemSupportsMemoryTagging(); @@ -186,6 +191,10 @@ getStats(Str, I, 0); } + void setReleaseToOsIntervalMs(s32 Interval) { + atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed); + } + uptr releaseToOS() { uptr TotalReleasedBytes = 0; for (uptr I = 0; I < NumClasses; I++) { @@ -241,7 +250,7 @@ uptr PrimaryBase; RegionInfo *RegionInfoArray; MapPlatformData Data; - s32 ReleaseToOsIntervalMs; + atomic_s32 ReleaseToOsIntervalMs; bool UseMemoryTagging; RegionInfo *getRegionInfo(uptr ClassId) const { @@ -375,6 +384,10 @@ getRegionBaseByClassId(ClassId)); } + s32 getReleaseToOsIntervalMs() { + return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed); + } + NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId, bool Force = false) { const uptr BlockSize = getSizeByClassId(ClassId); @@ -394,7 +407,7 @@ } if (!Force) { - const s32 IntervalMs = ReleaseToOsIntervalMs; + const s32 IntervalMs = getReleaseToOsIntervalMs(); if (IntervalMs < 0) return 0; if (Region->ReleaseInfo.LastReleaseAtNs + diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -62,7 +62,8 @@ void releaseToOS() {} }; -template +template class MapAllocatorCache { public: // Fuchsia doesn't allow releasing Secondary blocks yet. Note that 0 length @@ -71,7 +72,10 @@ static_assert(!SCUDO_FUCHSIA || MaxEntriesCount == 0U, ""); void initLinkerInitialized(s32 ReleaseToOsInterval) { - ReleaseToOsIntervalMs = ReleaseToOsInterval; + if (ReleaseToOsInterval < 0) { + ReleaseToOsInterval = DefaultReleaseToOsIntervalMs; + } + setReleaseToOsIntervalMs(ReleaseToOsInterval); } void init(s32 ReleaseToOsInterval) { memset(this, 0, sizeof(*this)); @@ -105,11 +109,11 @@ } } } + s32 Interval; if (EmptyCache) empty(); - else if (ReleaseToOsIntervalMs >= 0) - releaseOlderThan(Time - - static_cast(ReleaseToOsIntervalMs) * 1000000); + else if ((Interval = getReleaseToOsIntervalMs()) >= 0) + releaseOlderThan(Time - static_cast(Interval) * 1000000); return EntryCached; } @@ -142,6 +146,10 @@ return MaxEntriesCount != 0U && Size <= MaxEntrySize; } + void setReleaseToOsIntervalMs(s32 Interval) { + atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_relaxed); + } + void releaseToOS() { releaseOlderThan(UINT64_MAX); } void disable() { Mutex.lock(); } @@ -189,6 +197,10 @@ } } + s32 getReleaseToOsIntervalMs() { + return atomic_load(&ReleaseToOsIntervalMs, memory_order_relaxed); + } + struct CachedBlock { uptr Block; uptr BlockEnd; @@ -203,7 +215,7 @@ u32 EntriesCount; uptr LargestSize; u32 IsFullEvents; - s32 ReleaseToOsIntervalMs; + atomic_s32 ReleaseToOsIntervalMs; }; template class MapAllocator { @@ -251,6 +263,10 @@ static uptr canCache(uptr Size) { return CacheT::canCache(Size); } + void setReleaseToOsIntervalMs(s32 Interval) { + Cache.setReleaseToOsIntervalMs(Interval); + } + void releaseToOS() { Cache.releaseToOS(); } private: diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc --- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc +++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc @@ -157,7 +157,24 @@ INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) { if (param == M_DECAY_TIME) { - // TODO(kostyak): set release_to_os_interval_ms accordingly. + int secondary_interval; + if (SCUDO_ANDROID) { + if (value == 0) { + // Never go below 1 second for the primary. + value = 1000; + secondary_interval = 0; + } else { + value = 1000; + secondary_interval = 1000; + } + } else { + secondary_interval = value; + } + + SCUDO_ALLOCATOR.setOption(scudo::Option::PrimaryReleaseInterval, + static_cast(value)); + SCUDO_ALLOCATOR.setOption(scudo::Option::SecondaryReleaseInterval, + static_cast(secondary_interval)); return 1; } else if (param == M_PURGE) { SCUDO_ALLOCATOR.releaseToOS();