diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -70,7 +70,6 @@ reportError("SizeClassAllocator32 is not supported on Fuchsia"); PossibleRegions.initLinkerInitialized(); - MinRegionIndex = NumRegions; // MaxRegionIndex is already initialized to 0. u32 Seed; const u64 Time = getMonotonicTime(); @@ -81,6 +80,8 @@ for (uptr I = 0; I < NumClasses; I++) { SizeClassInfo *Sci = getSizeClassInfo(I); Sci->RandState = getRandomU32(&Seed); + // Sci->MaxRegionIndex is already initialized to 0. + Sci->MinRegionIndex = NumRegions; // See comment in the 64-bit primary about releasing smaller size classes. Sci->CanRelease = (I != SizeClassMap::BatchClassId) && (getSizeByClassId(I) >= (PageSize / 32)); @@ -98,7 +99,7 @@ while (NumberOfStashedRegions > 0) unmap(reinterpret_cast(RegionsStash[--NumberOfStashedRegions]), RegionSize); - for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) + for (uptr I = 0; I < NumRegions; I++) if (PossibleRegions[I]) unmap(reinterpret_cast(I * RegionSize), RegionSize); PossibleRegions.unmapTestOnly(); @@ -156,6 +157,14 @@ } template void iterateOverBlocks(F Callback) { + uptr MinRegionIndex = NumRegions, MaxRegionIndex = 0; + for (uptr I = 0; I < NumClasses; I++) { + SizeClassInfo *Sci = getSizeClassInfo(I); + if (Sci->MinRegionIndex < MinRegionIndex) + MinRegionIndex = Sci->MinRegionIndex; + if (Sci->MaxRegionIndex > MaxRegionIndex) + MaxRegionIndex = Sci->MaxRegionIndex; + } for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) if (PossibleRegions[I] && (PossibleRegions[I] - 1U) != SizeClassMap::BatchClassId) { @@ -207,18 +216,14 @@ return TotalReleasedBytes; } - static bool useMemoryTagging(Options Options) { - (void)Options; - return false; - } + static bool useMemoryTagging(UNUSED Options Options) { return false; } void disableMemoryTagging() {} const char *getRegionInfoArrayAddress() const { return nullptr; } static uptr getRegionInfoArraySize() { return 0; } - static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) { - (void)RegionInfoData; - (void)Ptr; + static BlockInfo findNearestBlock(UNUSED const char *RegionInfoData, + UNUSED uptr Ptr) { return {}; } @@ -252,6 +257,10 @@ bool CanRelease; u32 RandState; uptr AllocatedUser; + // Lowest & highest region index allocated for this size class, to avoid + // looping through the whole NumRegions. + uptr MinRegionIndex; + uptr MaxRegionIndex; ReleaseToOsInfo ReleaseInfo; }; static_assert(sizeof(SizeClassInfo) % SCUDO_CACHE_LINE_SIZE == 0, ""); @@ -299,10 +308,11 @@ Region = allocateRegionSlow(); if (LIKELY(Region)) { const uptr RegionIndex = computeRegionId(Region); - if (RegionIndex < MinRegionIndex) - MinRegionIndex = RegionIndex; - if (RegionIndex > MaxRegionIndex) - MaxRegionIndex = RegionIndex; + SizeClassInfo *Sci = getSizeClassInfo(ClassId); + if (RegionIndex < Sci->MinRegionIndex) + Sci->MinRegionIndex = RegionIndex; + if (RegionIndex > Sci->MaxRegionIndex) + Sci->MaxRegionIndex = RegionIndex; PossibleRegions.set(RegionIndex, static_cast(ClassId + 1U)); } return Region; @@ -411,7 +421,7 @@ const uptr BlockSize = getSizeByClassId(ClassId); const uptr PageSize = getPageSizeCached(); - CHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks); + DCHECK_GE(Sci->Stats.PoppedBlocks, Sci->Stats.PushedBlocks); const uptr BytesInFreeList = Sci->AllocatedUser - (Sci->Stats.PoppedBlocks - Sci->Stats.PushedBlocks) * BlockSize; @@ -446,39 +456,27 @@ } } - DCHECK_GT(MinRegionIndex, 0U); - uptr First = 0; - for (uptr I = MinRegionIndex; I <= MaxRegionIndex; I++) { - if (PossibleRegions[I] - 1U == ClassId) { - First = I; - break; - } - } - uptr Last = 0; - for (uptr I = MaxRegionIndex; I >= MinRegionIndex; I--) { - if (PossibleRegions[I] - 1U == ClassId) { - Last = I; - break; - } - } + const uptr First = Sci->MinRegionIndex; + const uptr Last = Sci->MaxRegionIndex; + DCHECK_NE(Last, 0U); + DCHECK_LE(First, Last); uptr TotalReleasedBytes = 0; + const uptr Base = First * RegionSize; + const uptr NumberOfRegions = Last - First + 1U; + ReleaseRecorder Recorder(Base); auto SkipRegion = [this, First, ClassId](uptr RegionIndex) { return (PossibleRegions[First + RegionIndex] - 1U) != ClassId; }; - if (First && Last) { - const uptr Base = First * RegionSize; - const uptr NumberOfRegions = Last - First + 1U; - ReleaseRecorder Recorder(Base); - releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions, - BlockSize, &Recorder, SkipRegion); - if (Recorder.getReleasedRangesCount() > 0) { - Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks; - Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount(); - Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes(); - TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes; - } + releaseFreeMemoryToOS(Sci->FreeList, Base, RegionSize, NumberOfRegions, + BlockSize, &Recorder, SkipRegion); + if (Recorder.getReleasedRangesCount() > 0) { + Sci->ReleaseInfo.PushedBlocksAtLastRelease = Sci->Stats.PushedBlocks; + Sci->ReleaseInfo.RangesReleased += Recorder.getReleasedRangesCount(); + Sci->ReleaseInfo.LastReleasedBytes = Recorder.getReleasedBytes(); + TotalReleasedBytes += Sci->ReleaseInfo.LastReleasedBytes; } Sci->ReleaseInfo.LastReleaseAtNs = getMonotonicTime(); + return TotalReleasedBytes; } @@ -486,10 +484,6 @@ // Track the regions in use, 0 is unused, otherwise store ClassId + 1. ByteMap PossibleRegions; - // Keep track of the lowest & highest regions allocated to avoid looping - // through the whole NumRegions. - uptr MinRegionIndex; - uptr MaxRegionIndex; atomic_s32 ReleaseToOsIntervalMs; // Unless several threads request regions simultaneously from different size // classes, the stash rarely contains more than 1 entry.