diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -131,6 +131,15 @@ return CompactPtrGroupBase; } + ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) { + const uptr PageSize = getPageSizeCached(); + return BlockSize < PageSize / 16U; + } + ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) { + const uptr PageSize = getPageSizeCached(); + return BlockSize > PageSize; + } + TransferBatch *popBatch(CacheT *C, uptr ClassId) { DCHECK_LT(ClassId, NumClasses); SizeClassInfo *Sci = getSizeClassInfo(ClassId); @@ -769,30 +778,34 @@ MaySkip = true; const bool CheckDensity = - BlockSize < PageSize / 16U && ReleaseType != ReleaseToOS::ForceAll; + isSmallBlock(BlockSize) && ReleaseType != ReleaseToOS::ForceAll; // Releasing smaller blocks is expensive, so we want to make sure that a // significant amount of bytes are free, and that there has been a good // amount of batches pushed to the freelist before attempting to release. - if (CheckDensity) { - if (ReleaseType == ReleaseToOS::Normal && - PushedBytesDelta < Sci->AllocatedUser / 16U) { + if (CheckDensity && ReleaseType == ReleaseToOS::Normal) { + if (PushedBytesDelta < Sci->AllocatedUser / 16U) MaySkip = true; - } } - if (MaySkip && ReleaseType != ReleaseToOS::ForceAll) - return 0; - if (ReleaseType == ReleaseToOS::Normal) { const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs); if (IntervalMs < 0) - return 0; - if (Sci->ReleaseInfo.LastReleaseAtNs + - static_cast(IntervalMs) * 1000000 > - getMonotonicTimeFast()) { - return 0; // Memory was returned recently. + MaySkip = true; + + const bool ByPassReleaseInterval = + isLargeBlock(BlockSize) && PushedBytesDelta > 8 * PageSize; + if (!MaySkip && !ByPassReleaseInterval) { + if (Sci->ReleaseInfo.LastReleaseAtNs + + static_cast(IntervalMs) * 1000000 > + getMonotonicTimeFast()) { + // Memory was returned recently. + MaySkip = true; + } } - } + } // if (ReleaseType == ReleaseToOS::Normal) + + if (MaySkip && ReleaseType != ReleaseToOS::ForceAll) + return 0; const uptr First = Sci->MinRegionIndex; const uptr Last = Sci->MaxRegionIndex; @@ -835,7 +848,7 @@ continue; } const uptr PushedBytesDelta = BytesInBG - BG.BytesInBGAtLastCheckpoint; - if (PushedBytesDelta < PageSize) + if (ReleaseType != ReleaseToOS::ForceAll && PushedBytesDelta < PageSize) continue; // Given the randomness property, we try to release the pages only if the diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -505,6 +505,10 @@ const uptr PageSize = getPageSizeCached(); return BlockSize < PageSize / 16U; } + ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) { + const uptr PageSize = getPageSizeCached(); + return BlockSize > PageSize; + } // Push the blocks to their batch group. The layout will be like, // @@ -881,26 +885,31 @@ // Releasing smaller blocks is expensive, so we want to make sure that a // significant amount of bytes are free, and that there has been a good // amount of batches pushed to the freelist before attempting to release. - if (CheckDensity) { - if (ReleaseType == ReleaseToOS::Normal && - RegionPushedBytesDelta < Region->TryReleaseThreshold) { + if (CheckDensity && ReleaseType == ReleaseToOS::Normal) { + if (RegionPushedBytesDelta < Region->TryReleaseThreshold) { MaySkip = true; } } - if (MaySkip && ReleaseType != ReleaseToOS::ForceAll) - return 0; - if (ReleaseType == ReleaseToOS::Normal) { const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs); if (IntervalMs < 0) - return 0; - if (Region->ReleaseInfo.LastReleaseAtNs + - static_cast(IntervalMs) * 1000000 > - getMonotonicTimeFast()) { - return 0; // Memory was returned recently. + MaySkip = true; + + const bool ByPassReleaseInterval = + isLargeBlock(BlockSize) && RegionPushedBytesDelta > 8 * PageSize; + if (!MaySkip && !ByPassReleaseInterval) { + if (Region->ReleaseInfo.LastReleaseAtNs + + static_cast(IntervalMs) * 1000000 > + getMonotonicTimeFast()) { + // Memory was returned recently. + MaySkip = true; + } } - } + } // if (ReleaseType == ReleaseToOS::Normal) + + if (MaySkip && ReleaseType != ReleaseToOS::ForceAll) + return 0; const uptr GroupSize = (1U << GroupSizeLog); const uptr AllocatedUserEnd = Region->AllocatedUser + Region->RegionBeg; diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp @@ -322,7 +322,7 @@ EXPECT_NE(P, nullptr); Cache.deallocate(ClassId, P); Cache.destroy(nullptr); - EXPECT_GT(Allocator->releaseToOS(scudo::ReleaseToOS::Force), 0U); + EXPECT_GT(Allocator->releaseToOS(scudo::ReleaseToOS::ForceAll), 0U); } SCUDO_TYPED_TEST(ScudoPrimaryTest, MemoryGroup) {