diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -266,7 +266,7 @@ uptr MapSize = 2 * RegionSize; const uptr MapBase = reinterpret_cast( map(nullptr, MapSize, "scudo:primary", MAP_ALLOWNOMEM)); - if (UNLIKELY(!MapBase)) + if (!MapBase) return 0; const uptr MapEnd = MapBase + MapSize; uptr Region = MapBase; @@ -313,29 +313,6 @@ return &SizeClassInfoArray[ClassId]; } - bool populateBatches(CacheT *C, SizeClassInfo *Sci, uptr ClassId, - TransferBatch **CurrentBatch, u32 MaxCount, - void **PointersArray, u32 Count) { - if (ClassId != SizeClassMap::BatchClassId) - shuffle(PointersArray, Count, &Sci->RandState); - TransferBatch *B = *CurrentBatch; - for (uptr I = 0; I < Count; I++) { - if (B && B->getCount() == MaxCount) { - Sci->FreeList.push_back(B); - B = nullptr; - } - if (!B) { - B = C->createBatch(ClassId, PointersArray[I]); - if (UNLIKELY(!B)) - return false; - B->clear(); - } - B->add(PointersArray[I]); - } - *CurrentBatch = B; - return true; - } - NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId, SizeClassInfo *Sci) { uptr Region; @@ -371,38 +348,35 @@ static_cast((RegionSize - Offset) / Size)); DCHECK_GT(NumberOfBlocks, 0U); - TransferBatch *B = nullptr; constexpr u32 ShuffleArraySize = MaxNumBatches * TransferBatch::MaxNumCached; // Fill the transfer batches and put them in the size-class freelist. We // need to randomize the blocks for security purposes, so we first fill a // local array that we then shuffle before populating the batches. void *ShuffleArray[ShuffleArraySize]; - u32 Count = 0; - const uptr AllocatedUser = Size * NumberOfBlocks; - for (uptr I = Region + Offset; I < Region + Offset + AllocatedUser; - I += Size) { - ShuffleArray[Count++] = reinterpret_cast(I); - if (Count == ShuffleArraySize) { - if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount, - ShuffleArray, Count))) - return nullptr; - Count = 0; - } - } - if (Count) { - if (UNLIKELY(!populateBatches(C, Sci, ClassId, &B, MaxCount, ShuffleArray, - Count))) + DCHECK_LE(NumberOfBlocks, ShuffleArraySize); + + uptr P = Region + Offset; + for (u32 I = 0; I < NumberOfBlocks; I++, P += Size) + ShuffleArray[I] = reinterpret_cast(P); + // No need to shuffle the batches size class. + if (ClassId != SizeClassMap::BatchClassId) + shuffle(ShuffleArray, NumberOfBlocks, &Sci->RandState); + for (u32 I = 0; I < NumberOfBlocks;) { + TransferBatch *B = C->createBatch(ClassId, ShuffleArray[I]); + if (UNLIKELY(!B)) return nullptr; - } - DCHECK(B); - if (!Sci->FreeList.empty()) { + const u32 N = Min(MaxCount, NumberOfBlocks - I); + B->setFromArray(&ShuffleArray[I], N); Sci->FreeList.push_back(B); - B = Sci->FreeList.front(); - Sci->FreeList.pop_front(); + I += N; } + TransferBatch *B = Sci->FreeList.front(); + Sci->FreeList.pop_front(); + DCHECK(B); DCHECK_GT(B->getCount(), 0); + const uptr AllocatedUser = Size * NumberOfBlocks; C->getStats().add(StatFree, AllocatedUser); DCHECK_LE(Sci->CurrentRegionAllocated + AllocatedUser, RegionSize); // If there is not enough room in the region currently associated to fit diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -320,30 +320,6 @@ return PrimaryBase + (ClassId << RegionSizeLog); } - bool populateBatches(CacheT *C, RegionInfo *Region, uptr ClassId, - TransferBatch **CurrentBatch, u32 MaxCount, - void **PointersArray, u32 Count) { - // No need to shuffle the batches size class. - if (ClassId != SizeClassMap::BatchClassId) - shuffle(PointersArray, Count, &Region->RandState); - TransferBatch *B = *CurrentBatch; - for (uptr I = 0; I < Count; I++) { - if (B && B->getCount() == MaxCount) { - Region->FreeList.push_back(B); - B = nullptr; - } - if (!B) { - B = C->createBatch(ClassId, PointersArray[I]); - if (UNLIKELY(!B)) - return false; - B->clear(); - } - B->add(PointersArray[I]); - } - *CurrentBatch = B; - return true; - } - NOINLINE TransferBatch *populateFreeList(CacheT *C, uptr ClassId, RegionInfo *Region) { const uptr Size = getSizeByClassId(ClassId); @@ -353,30 +329,30 @@ const uptr MappedUser = Region->MappedUser; const uptr TotalUserBytes = Region->AllocatedUser + MaxCount * Size; // Map more space for blocks, if necessary. - if (TotalUserBytes > MappedUser) { + if (UNLIKELY(TotalUserBytes > MappedUser)) { // Do the mmap for the user memory. const uptr UserMapSize = roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement); const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId); - if (UNLIKELY(RegionBase + MappedUser + UserMapSize > RegionSize)) { + if (RegionBase + MappedUser + UserMapSize > RegionSize) { if (!Region->Exhausted) { Region->Exhausted = true; ScopedString Str(1024); getStats(&Str); Str.append( - "Scudo OOM: The process has Exhausted %zuM for size class %zu.\n", + "Scudo OOM: The process has exhausted %zuM for size class %zu.\n", RegionSize >> 20, Size); Str.output(); } return nullptr; } - if (UNLIKELY(MappedUser == 0)) + if (MappedUser == 0) Region->Data = Data; - if (UNLIKELY(!map(reinterpret_cast(RegionBeg + MappedUser), - UserMapSize, "scudo:primary", - MAP_ALLOWNOMEM | MAP_RESIZABLE | - (useMemoryTagging(Options.load()) ? MAP_MEMTAG : 0), - &Region->Data))) + if (!map(reinterpret_cast(RegionBeg + MappedUser), UserMapSize, + "scudo:primary", + MAP_ALLOWNOMEM | MAP_RESIZABLE | + (useMemoryTagging(Options.load()) ? MAP_MEMTAG : 0), + &Region->Data)) return nullptr; Region->MappedUser += UserMapSize; C->getStats().add(StatMapped, UserMapSize); @@ -387,38 +363,34 @@ static_cast((Region->MappedUser - Region->AllocatedUser) / Size)); DCHECK_GT(NumberOfBlocks, 0); - TransferBatch *B = nullptr; constexpr u32 ShuffleArraySize = MaxNumBatches * TransferBatch::MaxNumCached; void *ShuffleArray[ShuffleArraySize]; - u32 Count = 0; - const uptr P = RegionBeg + Region->AllocatedUser; - const uptr AllocatedUser = Size * NumberOfBlocks; - for (uptr I = P; I < P + AllocatedUser; I += Size) { - ShuffleArray[Count++] = reinterpret_cast(I); - if (Count == ShuffleArraySize) { - if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount, - ShuffleArray, Count))) - return nullptr; - Count = 0; - } - } - if (Count) { - if (UNLIKELY(!populateBatches(C, Region, ClassId, &B, MaxCount, - ShuffleArray, Count))) + DCHECK_LE(NumberOfBlocks, ShuffleArraySize); + + uptr P = RegionBeg + Region->AllocatedUser; + for (u32 I = 0; I < NumberOfBlocks; I++, P += Size) + ShuffleArray[I] = reinterpret_cast(P); + // No need to shuffle the batches size class. + if (ClassId != SizeClassMap::BatchClassId) + shuffle(ShuffleArray, NumberOfBlocks, &Region->RandState); + for (u32 I = 0; I < NumberOfBlocks;) { + TransferBatch *B = C->createBatch(ClassId, ShuffleArray[I]); + if (UNLIKELY(!B)) return nullptr; - } - DCHECK(B); - if (!Region->FreeList.empty()) { + const u32 N = Min(MaxCount, NumberOfBlocks - I); + B->setFromArray(&ShuffleArray[I], N); Region->FreeList.push_back(B); - B = Region->FreeList.front(); - Region->FreeList.pop_front(); + I += N; } + TransferBatch *B = Region->FreeList.front(); + Region->FreeList.pop_front(); + DCHECK(B); DCHECK_GT(B->getCount(), 0); + const uptr AllocatedUser = Size * NumberOfBlocks; C->getStats().add(StatFree, AllocatedUser); Region->AllocatedUser += AllocatedUser; - Region->Exhausted = false; return B; }