diff --git a/compiler-rt/lib/scudo/standalone/local_cache.h b/compiler-rt/lib/scudo/standalone/local_cache.h --- a/compiler-rt/lib/scudo/standalone/local_cache.h +++ b/compiler-rt/lib/scudo/standalone/local_cache.h @@ -62,8 +62,8 @@ struct BatchGroup { // `Next` is used by IntrusiveList. BatchGroup *Next; - // The identifier of each group - uptr GroupId; + // The compact base address of each group + uptr CompactPtrGroupBase; // Cache value of TransferBatch::getMaxCached() u16 MaxCachedPerBatch; // Number of blocks pushed into this group. This is an increment-only diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -114,11 +114,14 @@ return reinterpret_cast(static_cast(CompactPtr)); } - uptr compactPtrGroup(CompactPtrT CompactPtr) { - return CompactPtr >> GroupSizeLog; + uptr compactPtrGroupBase(CompactPtrT CompactPtr) { + const uptr Mask = (static_cast(1) << GroupSizeLog) - 1; + return CompactPtr & ~Mask; } - uptr batchGroupBase(uptr GroupId) { return GroupId << GroupSizeLog; } + uptr decompactGroupBase(uptr CompactPtrGroupBase) { + return CompactPtrGroupBase; + } TransferBatch *popBatch(CacheT *C, uptr ClassId) { DCHECK_LT(ClassId, NumClasses); @@ -164,11 +167,12 @@ // together. bool SameGroup = true; for (u32 I = 1; I < Size; ++I) { - if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) + if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I])) SameGroup = false; CompactPtrT Cur = Array[I]; u32 J = I; - while (J > 0 && compactPtrGroup(Cur) < compactPtrGroup(Array[J - 1])) { + while (J > 0 && + compactPtrGroupBase(Cur) < compactPtrGroupBase(Array[J - 1])) { Array[J] = Array[J - 1]; --J; } @@ -350,6 +354,11 @@ const uptr End = Region + MapSize; if (End != MapEnd) unmap(reinterpret_cast(End), MapEnd - End); + + DCHECK_EQ(Region % RegionSize, 0U); + static_assert(Config::PrimaryRegionSizeLog == GroupSizeLog, + "Memory group should be the same size as Region"); + return Region; } @@ -403,7 +412,7 @@ REQUIRES(Sci->Mutex) { DCHECK_GT(Size, 0U); - auto CreateGroup = [&](uptr GroupId) { + auto CreateGroup = [&](uptr CompactPtrGroupBase) { BatchGroup *BG = nullptr; TransferBatch *TB = nullptr; if (ClassId == SizeClassMap::BatchClassId) { @@ -462,7 +471,7 @@ TB->clear(); } - BG->GroupId = GroupId; + BG->CompactPtrGroupBase = CompactPtrGroupBase; // TODO(chiahungduan): Avoid the use of push_back() in `Batches`. BG->Batches.push_front(TB); BG->PushedBlocks = 0; @@ -504,7 +513,7 @@ if (ClassId == SizeClassMap::BatchClassId) { if (Cur == nullptr) { // Don't need to classify BatchClassId. - Cur = CreateGroup(/*GroupId=*/0); + Cur = CreateGroup(/*CompactPtrGroupBase=*/0); Sci->FreeList.push_front(Cur); } InsertBlocks(Cur, Array, Size); @@ -515,13 +524,15 @@ // will be pushed next. `Prev` is the element right before `Cur`. BatchGroup *Prev = nullptr; - while (Cur != nullptr && compactPtrGroup(Array[0]) > Cur->GroupId) { + while (Cur != nullptr && + compactPtrGroupBase(Array[0]) > Cur->CompactPtrGroupBase) { Prev = Cur; Cur = Cur->Next; } - if (Cur == nullptr || compactPtrGroup(Array[0]) != Cur->GroupId) { - Cur = CreateGroup(compactPtrGroup(Array[0])); + if (Cur == nullptr || + compactPtrGroupBase(Array[0]) != Cur->CompactPtrGroupBase) { + Cur = CreateGroup(compactPtrGroupBase(Array[0])); if (Prev == nullptr) Sci->FreeList.push_front(Cur); else @@ -532,7 +543,7 @@ // id. if (SameGroup) { for (u32 I = 0; I < Size; ++I) - DCHECK_EQ(compactPtrGroup(Array[I]), Cur->GroupId); + DCHECK_EQ(compactPtrGroupBase(Array[I]), Cur->CompactPtrGroupBase); InsertBlocks(Cur, Array, Size); return; @@ -542,17 +553,19 @@ // push them to their group together. u32 Count = 1; for (u32 I = 1; I < Size; ++I) { - if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) { - DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->GroupId); + if (compactPtrGroupBase(Array[I - 1]) != compactPtrGroupBase(Array[I])) { + DCHECK_EQ(compactPtrGroupBase(Array[I - 1]), Cur->CompactPtrGroupBase); InsertBlocks(Cur, Array + I - Count, Count); - while (Cur != nullptr && compactPtrGroup(Array[I]) > Cur->GroupId) { + while (Cur != nullptr && + compactPtrGroupBase(Array[I]) > Cur->CompactPtrGroupBase) { Prev = Cur; Cur = Cur->Next; } - if (Cur == nullptr || compactPtrGroup(Array[I]) != Cur->GroupId) { - Cur = CreateGroup(compactPtrGroup(Array[I])); + if (Cur == nullptr || + compactPtrGroupBase(Array[I]) != Cur->CompactPtrGroupBase) { + Cur = CreateGroup(compactPtrGroupBase(Array[I])); DCHECK_NE(Prev, nullptr); Sci->FreeList.insert(Prev, Cur); } @@ -648,14 +661,14 @@ if (ClassId != SizeClassMap::BatchClassId) { u32 N = 1; - uptr CurGroup = compactPtrGroup(ShuffleArray[0]); + uptr CurGroup = compactPtrGroupBase(ShuffleArray[0]); for (u32 I = 1; I < NumberOfBlocks; I++) { - if (UNLIKELY(compactPtrGroup(ShuffleArray[I]) != CurGroup)) { + if (UNLIKELY(compactPtrGroupBase(ShuffleArray[I]) != CurGroup)) { shuffle(ShuffleArray + I - N, N, &Sci->RandState); pushBlocksImpl(C, ClassId, Sci, ShuffleArray + I - N, N, /*SameGroup=*/true); N = 1; - CurGroup = compactPtrGroup(ShuffleArray[I]); + CurGroup = compactPtrGroupBase(ShuffleArray[I]); } else { ++N; } @@ -744,8 +757,8 @@ const uptr Base = First * RegionSize; const uptr NumberOfRegions = Last - First + 1U; const uptr GroupSize = (1U << GroupSizeLog); - const uptr CurRegionGroupId = - compactPtrGroup(compactPtr(ClassId, Sci->CurrentRegion)); + const uptr CurGroupBase = + compactPtrGroupBase(compactPtr(ClassId, Sci->CurrentRegion)); ReleaseRecorder Recorder(Base); PageReleaseContext Context(BlockSize, RegionSize, NumberOfRegions, @@ -760,9 +773,10 @@ if (PushedBytesDelta * BlockSize < PageSize) continue; - uptr AllocatedGroupSize = BG.GroupId == CurRegionGroupId - ? Sci->CurrentRegionAllocated - : GroupSize; + uptr AllocatedGroupSize = + decompactGroupBase(BG.CompactPtrGroupBase) == CurGroupBase + ? Sci->CurrentRegionAllocated + : GroupSize; if (AllocatedGroupSize == 0) continue; @@ -791,15 +805,16 @@ // `CurrentRegionAllocated` of the current region. This exception excludes // the chance of doing range marking for the current region. const bool CanDoRangeMark = - GroupSize == RegionSize && BG.GroupId != CurRegionGroupId; + GroupSize == RegionSize && + decompactGroupBase(BG.CompactPtrGroupBase) != CurGroupBase; if (CanDoRangeMark && NumBlocks == MaxContainedBlocks) { for (const auto &It : BG.Batches) for (u16 I = 0; I < It.getCount(); ++I) - DCHECK_EQ(compactPtrGroup(It.get(I)), BG.GroupId); + DCHECK_EQ(compactPtrGroupBase(It.get(I)), BG.CompactPtrGroupBase); - const uptr From = batchGroupBase(BG.GroupId); - const uptr To = batchGroupBase(BG.GroupId) + AllocatedGroupSize; + const uptr From = decompactGroupBase(BG.CompactPtrGroupBase); + const uptr To = From + AllocatedGroupSize; Context.markRangeAsAllCounted(From, To, Base); } else { if (CanDoRangeMark) diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -47,6 +47,7 @@ typedef typename Config::PrimaryCompactPtrT CompactPtrT; static const uptr CompactPtrScale = Config::PrimaryCompactPtrScale; static const uptr GroupSizeLog = Config::PrimaryGroupSizeLog; + static const uptr GroupScale = GroupSizeLog - CompactPtrScale; typedef typename Config::SizeClassMap SizeClassMap; typedef SizeClassAllocator64 ThisT; typedef SizeClassAllocatorLocalCache CacheT; @@ -299,9 +300,6 @@ static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); } uptr getCompactPtrBaseByClassId(uptr ClassId) { - // If we are not compacting pointers, base everything off of 0. - if (sizeof(CompactPtrT) == sizeof(uptr) && CompactPtrScale == 0) - return 0; return getRegionInfo(ClassId)->RegionBeg; } @@ -435,10 +433,12 @@ } static uptr compactPtrGroup(CompactPtrT CompactPtr) { - return static_cast(CompactPtr) >> (GroupSizeLog - CompactPtrScale); + const uptr Mask = (static_cast(1) << GroupScale) - 1; + return static_cast(CompactPtr) & ~Mask; } - static uptr batchGroupBase(uptr Base, uptr GroupId) { - return (GroupId << GroupSizeLog) + Base; + static uptr decompactGroupBase(uptr Base, uptr CompactPtrGroupBase) { + DCHECK_EQ(CompactPtrGroupBase % (static_cast(1) << (GroupScale)), 0U); + return Base + (CompactPtrGroupBase << CompactPtrScale); } // Push the blocks to their batch group. The layout will be like, @@ -464,7 +464,7 @@ REQUIRES(Region->Mutex) { DCHECK_GT(Size, 0U); - auto CreateGroup = [&](uptr GroupId) { + auto CreateGroup = [&](uptr CompactPtrGroupBase) { BatchGroup *BG = nullptr; TransferBatch *TB = nullptr; if (ClassId == SizeClassMap::BatchClassId) { @@ -523,7 +523,7 @@ TB->clear(); } - BG->GroupId = GroupId; + BG->CompactPtrGroupBase = CompactPtrGroupBase; // TODO(chiahungduan): Avoid the use of push_back() in `Batches`. BG->Batches.push_front(TB); BG->PushedBlocks = 0; @@ -565,7 +565,7 @@ if (ClassId == SizeClassMap::BatchClassId) { if (Cur == nullptr) { // Don't need to classify BatchClassId. - Cur = CreateGroup(/*GroupId=*/0); + Cur = CreateGroup(/*CompactPtrGroupBase=*/0); Region->FreeList.push_front(Cur); } InsertBlocks(Cur, Array, Size); @@ -576,12 +576,14 @@ // will be pushed next. `Prev` is the element right before `Cur`. BatchGroup *Prev = nullptr; - while (Cur != nullptr && compactPtrGroup(Array[0]) > Cur->GroupId) { + while (Cur != nullptr && + compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBase) { Prev = Cur; Cur = Cur->Next; } - if (Cur == nullptr || compactPtrGroup(Array[0]) != Cur->GroupId) { + if (Cur == nullptr || + compactPtrGroup(Array[0]) != Cur->CompactPtrGroupBase) { Cur = CreateGroup(compactPtrGroup(Array[0])); if (Prev == nullptr) Region->FreeList.push_front(Cur); @@ -593,7 +595,7 @@ // id. if (SameGroup) { for (u32 I = 0; I < Size; ++I) - DCHECK_EQ(compactPtrGroup(Array[I]), Cur->GroupId); + DCHECK_EQ(compactPtrGroup(Array[I]), Cur->CompactPtrGroupBase); InsertBlocks(Cur, Array, Size); return; @@ -604,15 +606,17 @@ u32 Count = 1; for (u32 I = 1; I < Size; ++I) { if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) { - DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->GroupId); + DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->CompactPtrGroupBase); InsertBlocks(Cur, Array + I - Count, Count); - while (Cur != nullptr && compactPtrGroup(Array[I]) > Cur->GroupId) { + while (Cur != nullptr && + compactPtrGroup(Array[I]) > Cur->CompactPtrGroupBase) { Prev = Cur; Cur = Cur->Next; } - if (Cur == nullptr || compactPtrGroup(Array[I]) != Cur->GroupId) { + if (Cur == nullptr || + compactPtrGroup(Array[I]) != Cur->CompactPtrGroupBase) { Cur = CreateGroup(compactPtrGroup(Array[I])); DCHECK_NE(Prev, nullptr); Region->FreeList.insert(Prev, Cur); @@ -823,40 +827,27 @@ continue; } - // Group boundary does not necessarily have the same alignment as Region. - // It may sit across a Region boundary. Which means that we may have the - // following two cases, - // - // 1. Group boundary sits before RegionBeg. - // - // (BatchGroupBase) - // batchGroupBase RegionBeg BatchGroupEnd - // | | | - // v v v - // +------------+----------------+ - // \ / - // ------ GroupSize ------ - // - // 2. Group boundary sits after RegionBeg. + // Group boundary is always GroupSize-aligned from CompactPtr base. The + // layout of memory groups is like, // - // (BatchGroupBase) - // RegionBeg batchGroupBase BatchGroupEnd - // | | | - // v v v - // +-----------+-----------------------------+ - // \ / - // ------ GroupSize ------ + // (CompactPtrBase) + // #1 CompactPtrGroupBase #2 CompactPtrGroupBase ... + // | | | + // v v v + // +-----------------------+-----------------------+ + // \ / \ / + // --- GroupSize --- --- GroupSize --- // - // Note that in the first case, the group range before RegionBeg is never - // used. Therefore, while calculating the used group size, we should - // exclude that part to get the correct size. + // After decompacting the CompactPtrGroupBase, we expect the alignment + // property is held as well. const uptr BatchGroupBase = - Max(batchGroupBase(CompactPtrBase, BG->GroupId), Region->RegionBeg); + decompactGroupBase(CompactPtrBase, BG->CompactPtrGroupBase); + DCHECK_LE(Region->RegionBeg, BatchGroupBase); DCHECK_GE(AllocatedUserEnd, BatchGroupBase); - const uptr BatchGroupEnd = - batchGroupBase(CompactPtrBase, BG->GroupId) + GroupSize; + DCHECK_EQ((Region->RegionBeg - BatchGroupBase) % GroupSize, 0U); + const uptr BatchGroupEnd = BatchGroupBase + GroupSize; const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd - ? BatchGroupEnd - BatchGroupBase + ? GroupSize : AllocatedUserEnd - BatchGroupBase; if (AllocatedGroupSize == 0) { Prev = BG; @@ -928,11 +919,11 @@ if (GroupToRelease.empty()) return 0; - const uptr ReleaseBase = - Max(batchGroupBase(CompactPtrBase, GroupToRelease.front()->GroupId), - Region->RegionBeg); + const uptr ReleaseBase = decompactGroupBase( + CompactPtrBase, GroupToRelease.front()->CompactPtrGroupBase); const uptr LastGroupEnd = - Min(batchGroupBase(CompactPtrBase, GroupToRelease.back()->GroupId) + + Min(decompactGroupBase(CompactPtrBase, + GroupToRelease.back()->CompactPtrGroupBase) + GroupSize, AllocatedUserEnd); // The last block may straddle the group boundary. Rounding up to BlockSize @@ -950,15 +941,11 @@ for (BatchGroup &BG : GroupToRelease) { BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks; - // TODO(chiahungduan): Replace GroupId with BatchGroupBase to simplify the - // calculation here and above (where we determine the set of groups to - // release). const uptr BatchGroupBase = - Max(batchGroupBase(CompactPtrBase, BG.GroupId), Region->RegionBeg); - const uptr BatchGroupEnd = - batchGroupBase(CompactPtrBase, BG.GroupId) + GroupSize; + decompactGroupBase(CompactPtrBase, BG.CompactPtrGroupBase); + const uptr BatchGroupEnd = BatchGroupBase + GroupSize; const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd - ? BatchGroupEnd - BatchGroupBase + ? GroupSize : AllocatedUserEnd - BatchGroupBase; const uptr BatchGroupUsedEnd = BatchGroupBase + AllocatedGroupSize; const bool BlockAlignedWithUsedEnd = @@ -974,7 +961,7 @@ if (NumBlocks == MaxContainedBlocks) { for (const auto &It : BG.Batches) for (u16 I = 0; I < It.getCount(); ++I) - DCHECK_EQ(compactPtrGroup(It.get(I)), BG.GroupId); + DCHECK_EQ(compactPtrGroup(It.get(I)), BG.CompactPtrGroupBase); Context.markRangeAsAllCounted(BatchGroupBase, BatchGroupUsedEnd, Region->RegionBeg); @@ -1009,9 +996,10 @@ break; } - DCHECK_NE(BG->GroupId, GroupToRelease.front()->GroupId); + DCHECK_NE(BG->CompactPtrGroupBase, + GroupToRelease.front()->CompactPtrGroupBase); - if (BG->GroupId < GroupToRelease.front()->GroupId) + if (BG->CompactPtrGroupBase < GroupToRelease.front()->CompactPtrGroupBase) continue; BatchGroup *Cur = GroupToRelease.front();