diff --git a/compiler-rt/lib/scudo/standalone/release.h b/compiler-rt/lib/scudo/standalone/release.h --- a/compiler-rt/lib/scudo/standalone/release.h +++ b/compiler-rt/lib/scudo/standalone/release.h @@ -480,8 +480,9 @@ } uptr LastBlockInRange = roundDownSlow(ToInRegion - 1, BlockSize); - if (LastBlockInRange < FromInRegion) - return; + + // Note that LastBlockInRange may be smaller than `FromInRegion` at this + // point because it may contain only one block in the range. // When the last block sits across `To`, we can't just mark the pages // occupied by the last block as all counted. Instead, we increment the @@ -540,13 +541,18 @@ // last block const uptr RoundedRegionSize = roundUp(RegionSize, PageSize); const uptr TrailingBlockBase = LastBlockInRegion + BlockSize; - // Only the last page touched by the last block needs to mark the trailing - // blocks. If the difference between `RoundedRegionSize` and + // If the difference between `RoundedRegionSize` and // `TrailingBlockBase` is larger than a page, that implies the reported // `RegionSize` may not be accurate. DCHECK_LT(RoundedRegionSize - TrailingBlockBase, PageSize); + + // Only the last page touched by the last block needs to mark the trailing + // blocks. Note that if the last "pretend" block straddles the boundary, + // we still have to count it in so that the logic of counting the number + // of blocks on a page is consistent. uptr NumTrailingBlocks = - roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) / + (roundUpSlow(RoundedRegionSize - TrailingBlockBase, BlockSize) + + BlockSize - 1) / BlockSize; if (NumTrailingBlocks > 0) { PageMap.incN(RegionIndex, getPageIndex(TrailingBlockBase), diff --git a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp @@ -315,12 +315,13 @@ const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize); std::vector Pages(RoundedRegionSize / PageSize, 0); - for (scudo::uptr Block = 0; Block + BlockSize <= RoundedRegionSize; - Block += BlockSize) { - for (scudo::uptr page = Block / PageSize; - page <= (Block + BlockSize - 1) / PageSize; ++page) { - ASSERT_LT(page, Pages.size()); - ++Pages[page]; + for (scudo::uptr Block = 0; Block < RoundedRegionSize; Block += BlockSize) { + for (scudo::uptr Page = Block / PageSize; + Page <= (Block + BlockSize - 1) / PageSize && + Page < RoundedRegionSize / PageSize; + ++Page) { + ASSERT_LT(Page, Pages.size()); + ++Pages[Page]; } } @@ -410,8 +411,6 @@ template void testReleasePartialRegion() { typedef FreeBatch Batch; const scudo::uptr PageSize = scudo::getPageSizeCached(); - const scudo::uptr ReleaseBase = PageSize; - const scudo::uptr BasePageOffset = ReleaseBase / PageSize; for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) { // In the following, we want to ensure the region includes at least 2 pages @@ -419,8 +418,11 @@ // the last block is tricky, so we always test the case that includes the // last block. const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I); + const scudo::uptr ReleaseBase = scudo::roundUp(BlockSize, PageSize); + const scudo::uptr BasePageOffset = ReleaseBase / PageSize; const scudo::uptr RegionSize = - scudo::roundUpSlow(scudo::roundUp(BlockSize, PageSize) * 2, BlockSize) + + scudo::roundUpSlow(scudo::roundUp(BlockSize, PageSize) + ReleaseBase, + BlockSize) + BlockSize; const scudo::uptr RoundedRegionSize = scudo::roundUp(RegionSize, PageSize); @@ -429,7 +431,7 @@ // Skip the blocks in the first page and add the remaining. std::vector Pages(RoundedRegionSize / PageSize, 0); - for (scudo::uptr Block = scudo::roundUpSlow(PageSize, BlockSize); + for (scudo::uptr Block = scudo::roundUpSlow(ReleaseBase, BlockSize); Block + BlockSize <= RoundedRegionSize; Block += BlockSize) { for (scudo::uptr Page = Block / PageSize; Page <= (Block + BlockSize - 1) / PageSize; ++Page) { @@ -444,7 +446,7 @@ ++Pages.back(); Batch *CurrentBatch = nullptr; - for (scudo::uptr Block = scudo::roundUpSlow(PageSize, BlockSize); + for (scudo::uptr Block = scudo::roundUpSlow(ReleaseBase, BlockSize); Block < RegionSize; Block += BlockSize) { if (CurrentBatch == nullptr || CurrentBatch->getCount() == Batch::MaxCount) { @@ -459,7 +461,7 @@ auto SkipRegion = [](UNUSED scudo::uptr RegionIndex) { return false; }; ReleasedPagesRecorder Recorder(ReleaseBase); releaseFreeMemoryToOS(Context, Recorder, SkipRegion); - const scudo::uptr FirstBlock = scudo::roundUpSlow(PageSize, BlockSize); + const scudo::uptr FirstBlock = scudo::roundUpSlow(ReleaseBase, BlockSize); for (scudo::uptr P = 0; P < RoundedRegionSize; P += PageSize) { if (P < FirstBlock) { @@ -563,6 +565,69 @@ testReleasePartialRegion(); } +template void testReleaseRangeWithSingleBlock() { + const scudo::uptr PageSize = scudo::getPageSizeCached(); + + // We want to test if a memory group only contains single block that will be + // handled properly. The case is like: + // + // From To + // +----------------------+ + // +------------+------------+ + // | | | + // +------------+------------+ + // ^ + // RegionSize + // + // Note that `From` will be page aligned. + // + // If the second from the last block is aligned at `From`, then we expect all + // the pages after `From` will be marked as can-be-released. Otherwise, the + // pages only touched by the last blocks will be marked as can-be-released. + for (scudo::uptr I = 1; I <= SizeClassMap::LargestClassId; I++) { + const scudo::uptr BlockSize = SizeClassMap::getSizeByClassId(I); + const scudo::uptr From = scudo::roundUp(BlockSize, PageSize); + const scudo::uptr To = + From % BlockSize == 0 + ? From + BlockSize + : scudo::roundDownSlow(From + BlockSize, BlockSize) + BlockSize; + const scudo::uptr RoundedRegionSize = scudo::roundUp(To, PageSize); + + std::vector Pages(RoundedRegionSize / PageSize, 0); + for (scudo::uptr Block = (To - BlockSize); Block < RoundedRegionSize; + Block += BlockSize) { + for (scudo::uptr Page = Block / PageSize; + Page <= (Block + BlockSize - 1) / PageSize && + Page < RoundedRegionSize / PageSize; + ++Page) { + ASSERT_LT(Page, Pages.size()); + ++Pages[Page]; + } + } + + scudo::PageReleaseContext Context(BlockSize, /*NumberOfRegions=*/1U, + /*ReleaseSize=*/To, + /*ReleaseBase=*/0U); + Context.markRangeAsAllCounted(From, To, /*Base=*/0U, /*RegionIndex=*/0, + /*RegionSize=*/To); + + for (scudo::uptr Page = 0; Page < RoundedRegionSize; Page += PageSize) { + if (Context.PageMap.get(/*Region=*/0U, Page / PageSize) != + Pages[Page / PageSize]) { + EXPECT_TRUE( + Context.PageMap.isAllCounted(/*Region=*/0U, Page / PageSize)); + } + } + } // for each size class +} + +TEST(ScudoReleaseTest, RangeReleaseRegionWithSingleBlock) { + testReleaseRangeWithSingleBlock(); + testReleaseRangeWithSingleBlock(); + testReleaseRangeWithSingleBlock(); + testReleaseRangeWithSingleBlock(); +} + TEST(ScudoReleaseTest, BufferPool) { constexpr scudo::uptr StaticBufferCount = SCUDO_WORDSIZE - 1; constexpr scudo::uptr StaticBufferSize = 512U;