diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -131,6 +131,15 @@ return CompactPtrGroupBase; } + ALWAYS_INLINE static bool isSmallBlock(uptr BlockSize) { + const uptr PageSize = getPageSizeCached(); + return BlockSize < PageSize / 16U; + } + ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) { + const uptr PageSize = getPageSizeCached(); + return BlockSize > PageSize; + } + TransferBatch *popBatch(CacheT *C, uptr ClassId) { DCHECK_LT(ClassId, NumClasses); SizeClassInfo *Sci = getSizeClassInfo(ClassId); @@ -769,31 +778,30 @@ MaySkip = true; const bool CheckDensity = - BlockSize < PageSize / 16U && ReleaseType != ReleaseToOS::ForceAll; + isSmallBlock(BlockSize) && ReleaseType != ReleaseToOS::ForceAll; // Releasing smaller blocks is expensive, so we want to make sure that a // significant amount of bytes are free, and that there has been a good // amount of batches pushed to the freelist before attempting to release. - if (CheckDensity) { - if (ReleaseType == ReleaseToOS::Normal && - PushedBytesDelta < Sci->AllocatedUser / 16U) { + if (CheckDensity && ReleaseType == ReleaseToOS::Normal) { + if (PushedBytesDelta < Sci->AllocatedUser / 16U) MaySkip = true; + + if (!isLargeBlock(BlockSize)) { + const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs); + if (IntervalMs < 0) + MaySkip = true; + if (Sci->ReleaseInfo.LastReleaseAtNs + + static_cast(IntervalMs) * 1000000 > + getMonotonicTimeFast()) { + // Memory was returned recently. + MaySkip = true; + } } - } + } // if (CheckDensity && ReleaseType == ReleaseToOS::Normal) if (MaySkip && ReleaseType != ReleaseToOS::ForceAll) return 0; - if (ReleaseType == ReleaseToOS::Normal) { - const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs); - if (IntervalMs < 0) - return 0; - if (Sci->ReleaseInfo.LastReleaseAtNs + - static_cast(IntervalMs) * 1000000 > - getMonotonicTimeFast()) { - return 0; // Memory was returned recently. - } - } - const uptr First = Sci->MinRegionIndex; const uptr Last = Sci->MaxRegionIndex; DCHECK_NE(Last, 0U); diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -505,6 +505,10 @@ const uptr PageSize = getPageSizeCached(); return BlockSize < PageSize / 16U; } + ALWAYS_INLINE static bool isLargeBlock(uptr BlockSize) { + const uptr PageSize = getPageSizeCached(); + return BlockSize > PageSize; + } // Push the blocks to their batch group. The layout will be like, // @@ -881,27 +885,27 @@ // Releasing smaller blocks is expensive, so we want to make sure that a // significant amount of bytes are free, and that there has been a good // amount of batches pushed to the freelist before attempting to release. - if (CheckDensity) { - if (ReleaseType == ReleaseToOS::Normal && - RegionPushedBytesDelta < Region->TryReleaseThreshold) { + if (CheckDensity && ReleaseType == ReleaseToOS::Normal) { + if (RegionPushedBytesDelta < Region->TryReleaseThreshold) { MaySkip = true; } - } + + if (!isLargeBlock(BlockSize)) { + const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs); + if (IntervalMs < 0) + MaySkip = true; + if (Region->ReleaseInfo.LastReleaseAtNs + + static_cast(IntervalMs) * 1000000 > + getMonotonicTimeFast()) { + // Memory was returned recently. + MaySkip = true; + } + } + } // if (CheckDensity && ReleaseType == ReleaseToOS::Normal) if (MaySkip && ReleaseType != ReleaseToOS::ForceAll) return 0; - if (ReleaseType == ReleaseToOS::Normal) { - const s32 IntervalMs = atomic_load_relaxed(&ReleaseToOsIntervalMs); - if (IntervalMs < 0) - return 0; - if (Region->ReleaseInfo.LastReleaseAtNs + - static_cast(IntervalMs) * 1000000 > - getMonotonicTimeFast()) { - return 0; // Memory was returned recently. - } - } - const uptr GroupSize = (1U << GroupSizeLog); const uptr AllocatedUserEnd = Region->AllocatedUser + Region->RegionBeg; const uptr CompactPtrBase = getCompactPtrBaseByClassId(ClassId); diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp @@ -322,7 +322,11 @@ EXPECT_NE(P, nullptr); Cache.deallocate(ClassId, P); Cache.destroy(nullptr); - EXPECT_GT(Allocator->releaseToOS(scudo::ReleaseToOS::Force), 0U); + Allocator->releaseToOS(scudo::ReleaseToOS::Force); + // Deallocation and Cache destruction may also trigger page release. + // Therefore, instead of checking if releaseToOS() returns non-zero value, do + // two explicit page releases and ensure the later one has nothing to release. + EXPECT_EQ(Allocator->releaseToOS(scudo::ReleaseToOS::Force), 0U); } SCUDO_TYPED_TEST(ScudoPrimaryTest, MemoryGroup) {