diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -395,14 +395,6 @@ // Use `SameGroup=true` to indicate that all blocks in the array are from the // same group then we will skip checking the group id of each block. // - // Note that this aims to have a better management of dirty pages, i.e., the - // RSS usage won't grow indefinitely. There's an exception that we may not put - // a block to its associated group. While populating new blocks, we may have - // blocks cross different groups. However, most cases will fall into same - // group and they are supposed to be popped soon. In that case, it's not worth - // sorting the array with the almost-sorted property. Therefore, we use - // `SameGroup=true` instead. - // // The region mutex needs to be held while calling this method. void pushBlocksImpl(CacheT *C, uptr ClassId, SizeClassInfo *Sci, CompactPtrT *Array, u32 Size, bool SameGroup = false) @@ -537,6 +529,9 @@ // All the blocks are from the same group, just push without checking group // id. if (SameGroup) { + for (u32 I = 0; I < Size; ++I) + DCHECK_EQ(compactPtrGroup(Array[I]), Cur->GroupId); + InsertBlocks(Cur, Array, Size); return; } @@ -648,11 +643,29 @@ uptr P = Region + Offset; for (u32 I = 0; I < NumberOfBlocks; I++, P += Size) ShuffleArray[I] = reinterpret_cast(P); - // No need to shuffle the batches size class. - if (ClassId != SizeClassMap::BatchClassId) - shuffle(ShuffleArray, NumberOfBlocks, &Sci->RandState); - pushBlocksImpl(C, ClassId, Sci, ShuffleArray, NumberOfBlocks, - /*SameGroup=*/true); + + if (ClassId != SizeClassMap::BatchClassId) { + u32 N = 1; + uptr CurGroup = compactPtrGroup(ShuffleArray[0]); + for (u32 I = 1; I < NumberOfBlocks; I++) { + if (UNLIKELY(compactPtrGroup(ShuffleArray[I]) != CurGroup)) { + shuffle(ShuffleArray + I - N, N, &Sci->RandState); + pushBlocksImpl(C, ClassId, Sci, ShuffleArray + I - N, N, + /*SameGroup=*/true); + N = 1; + CurGroup = compactPtrGroup(ShuffleArray[I]); + } else { + ++N; + } + } + + shuffle(ShuffleArray + NumberOfBlocks - N, N, &Sci->RandState); + pushBlocksImpl(C, ClassId, Sci, &ShuffleArray[NumberOfBlocks - N], N, + /*SameGroup=*/true); + } else { + pushBlocksImpl(C, ClassId, Sci, ShuffleArray, NumberOfBlocks, + /*SameGroup=*/true); + } const uptr AllocatedUser = Size * NumberOfBlocks; C->getStats().add(StatFree, AllocatedUser); diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -458,14 +458,6 @@ // Use `SameGroup=true` to indicate that all blocks in the array are from the // same group then we will skip checking the group id of each block. // - // Note that this aims to have a better management of dirty pages, i.e., the - // RSS usage won't grow indefinitely. There's an exception that we may not put - // a block to its associated group. While populating new blocks, we may have - // blocks cross different groups. However, most cases will fall into same - // group and they are supposed to be popped soon. In that case, it's not worth - // sorting the array with the almost-sorted property. Therefore, we use - // `SameGroup=true` instead. - // // The region mutex needs to be held while calling this method. void pushBlocksImpl(CacheT *C, uptr ClassId, RegionInfo *Region, CompactPtrT *Array, u32 Size, bool SameGroup = false) @@ -600,6 +592,9 @@ // All the blocks are from the same group, just push without checking group // id. if (SameGroup) { + for (u32 I = 0; I < Size; ++I) + DCHECK_EQ(compactPtrGroup(Array[I]), Cur->GroupId); + InsertBlocks(Cur, Array, Size); return; } @@ -712,11 +707,29 @@ uptr P = RegionBeg + Region->AllocatedUser; for (u32 I = 0; I < NumberOfBlocks; I++, P += Size) ShuffleArray[I] = compactPtrInternal(CompactPtrBase, P); - // No need to shuffle the batches size class. - if (ClassId != SizeClassMap::BatchClassId) - shuffle(ShuffleArray, NumberOfBlocks, &Region->RandState); - pushBlocksImpl(C, ClassId, Region, ShuffleArray, NumberOfBlocks, - /*SameGroup=*/true); + + if (ClassId != SizeClassMap::BatchClassId) { + u32 N = 1; + uptr CurGroup = compactPtrGroup(ShuffleArray[0]); + for (u32 I = 1; I < NumberOfBlocks; I++) { + if (UNLIKELY(compactPtrGroup(ShuffleArray[I]) != CurGroup)) { + shuffle(ShuffleArray + I - N, N, &Region->RandState); + pushBlocksImpl(C, ClassId, Region, ShuffleArray + I - N, N, + /*SameGroup=*/true); + N = 1; + CurGroup = compactPtrGroup(ShuffleArray[I]); + } else { + ++N; + } + } + + shuffle(ShuffleArray + NumberOfBlocks - N, N, &Region->RandState); + pushBlocksImpl(C, ClassId, Region, &ShuffleArray[NumberOfBlocks - N], N, + /*SameGroup=*/true); + } else { + pushBlocksImpl(C, ClassId, Region, ShuffleArray, NumberOfBlocks, + /*SameGroup=*/true); + } const uptr AllocatedUser = Size * NumberOfBlocks; C->getStats().add(StatFree, AllocatedUser);