diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -588,8 +588,9 @@ return Options.MayReturnNull; } - // TODO(kostyak): implement this as a "backend" to mallopt. - bool setOption(UNUSED uptr Option, UNUSED uptr Value) { return false; } + void setReleaseToOsIntervalMs(s32 Interval) { + Primary.setReleaseToOsIntervalMs(Interval); + } // Return the usable size for a given chunk. Technically we lie, as we just // report the actual size of a chunk. This is done to counteract code actively diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -73,11 +73,10 @@ SizeClassInfo *Sci = getSizeClassInfo(I); Sci->RandState = getRandomU32(&Seed); // See comment in the 64-bit primary about releasing smaller size classes. - Sci->CanRelease = (ReleaseToOsInterval >= 0) && - (I != SizeClassMap::BatchClassId) && + Sci->CanRelease = (I != SizeClassMap::BatchClassId) && (getSizeByClassId(I) >= (PageSize / 32)); } - ReleaseToOsIntervalMs = ReleaseToOsInterval; + setReleaseToOsIntervalMs(ReleaseToOsInterval); } void init(s32 ReleaseToOsInterval) { memset(this, 0, sizeof(*this)); @@ -175,6 +174,10 @@ getStats(Str, I, 0); } + void setReleaseToOsIntervalMs(s32 Interval) { + atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_release); + } + uptr releaseToOS() { uptr TotalReleasedBytes = 0; for (uptr I = 0; I < NumClasses; I++) { @@ -363,6 +366,10 @@ AvailableChunks, Rss >> 10); } + s32 getReleaseToOsIntervalMs() { + return atomic_load(&ReleaseToOsIntervalMs, memory_order_acquire); + } + NOINLINE uptr releaseToOSMaybe(SizeClassInfo *Sci, uptr ClassId, bool Force = false) { const uptr BlockSize = getSizeByClassId(ClassId); @@ -381,7 +388,7 @@ } if (!Force) { - const s32 IntervalMs = ReleaseToOsIntervalMs; + const s32 IntervalMs = getReleaseToOsIntervalMs(); if (IntervalMs < 0) return 0; if (Sci->ReleaseInfo.LastReleaseAtNs + @@ -419,7 +426,7 @@ // through the whole NumRegions. uptr MinRegionIndex; uptr MaxRegionIndex; - s32 ReleaseToOsIntervalMs; + atomic_s32 ReleaseToOsIntervalMs; // Unless several threads request regions simultaneously from different size // classes, the stash rarely contains more than 1 entry. static constexpr uptr MaxStashedRegions = 4; diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -86,12 +86,11 @@ // memory accesses which ends up being fairly costly. The current lower // limit is mostly arbitrary and based on empirical observations. // TODO(kostyak): make the lower limit a runtime option - Region->CanRelease = (ReleaseToOsInterval >= 0) && - (I != SizeClassMap::BatchClassId) && + Region->CanRelease = (I != SizeClassMap::BatchClassId) && (getSizeByClassId(I) >= (PageSize / 32)); Region->RandState = getRandomU32(&Seed); } - ReleaseToOsIntervalMs = ReleaseToOsInterval; + setReleaseToOsIntervalMs(ReleaseToOsInterval); if (SupportsMemoryTagging) UseMemoryTagging = systemSupportsMemoryTagging(); @@ -187,6 +186,10 @@ getStats(Str, I, 0); } + void setReleaseToOsIntervalMs(s32 Interval) { + atomic_store(&ReleaseToOsIntervalMs, Interval, memory_order_release); + } + uptr releaseToOS() { uptr TotalReleasedBytes = 0; for (uptr I = 0; I < NumClasses; I++) { @@ -244,7 +247,7 @@ uptr PrimaryBase; RegionInfo *RegionInfoArray; MapPlatformData Data; - s32 ReleaseToOsIntervalMs; + atomic_s32 ReleaseToOsIntervalMs; bool UseMemoryTagging; RegionInfo *getRegionInfo(uptr ClassId) const { @@ -378,6 +381,10 @@ getRegionBaseByClassId(ClassId)); } + s32 getReleaseToOsIntervalMs() { + return atomic_load(&ReleaseToOsIntervalMs, memory_order_acquire); + } + NOINLINE uptr releaseToOSMaybe(RegionInfo *Region, uptr ClassId, bool Force = false) { const uptr BlockSize = getSizeByClassId(ClassId); @@ -397,7 +404,7 @@ } if (!Force) { - const s32 IntervalMs = ReleaseToOsIntervalMs; + const s32 IntervalMs = getReleaseToOsIntervalMs(); if (IntervalMs < 0) return 0; if (Region->ReleaseInfo.LastReleaseAtNs + diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc --- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc +++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc @@ -154,9 +154,21 @@ SCUDO_PREFIX(malloc_enable)); } -INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, UNUSED int value) { +INTERFACE WEAK int SCUDO_PREFIX(mallopt)(int param, int value) { if (param == M_DECAY_TIME) { - // TODO(kostyak): set release_to_os_interval_ms accordingly. + if (SCUDO_ANDROID) { + switch (value) { + case 0: + value = 500; + break; + case 1: + value = 5000; + break; + default: + scudo::Printf("Scudo WARNING: invalid decay value (%d):\n", value); + } + } + SCUDO_ALLOCATOR.setReleaseToOsIntervalMs(value); return 1; } else if (param == M_PURGE) { SCUDO_ALLOCATOR.releaseToOS();