diff --git a/compiler-rt/lib/scudo/standalone/local_cache.h b/compiler-rt/lib/scudo/standalone/local_cache.h --- a/compiler-rt/lib/scudo/standalone/local_cache.h +++ b/compiler-rt/lib/scudo/standalone/local_cache.h @@ -62,8 +62,8 @@ struct BatchGroup { // `Next` is used by IntrusiveList. BatchGroup *Next; - // The identifier of each group - uptr GroupId; + // The compact begin address of each group + uptr CompactPtrGroupBeg; // Cache value of TransferBatch::getMaxCached() u16 MaxCachedPerBatch; // Number of blocks pushed into this group. This is an increment-only diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -115,10 +115,13 @@ } uptr compactPtrGroup(CompactPtrT CompactPtr) { - return CompactPtr >> GroupSizeLog; + const uptr Mask = (static_cast(1) << GroupSizeLog) - 1; + return CompactPtr & ~Mask; } - uptr batchGroupBase(uptr GroupId) { return GroupId << GroupSizeLog; } + uptr decompactGroupBase(uptr CompactPtrGroupBeg) { + return CompactPtrGroupBeg; + } TransferBatch *popBatch(CacheT *C, uptr ClassId) { DCHECK_LT(ClassId, NumClasses); @@ -350,6 +353,12 @@ const uptr End = Region + MapSize; if (End != MapEnd) unmap(reinterpret_cast(End), MapEnd - End); + + DCHECK_EQ(Region % RegionSize, 0U); + static_assert(Config::PrimaryRegionSizeLog >= GroupSizeLog, + "Memory group should be aligned with RegionBeg (which is " + "aligned with RegionSize)"); + return Region; } @@ -403,7 +412,7 @@ REQUIRES(Sci->Mutex) { DCHECK_GT(Size, 0U); - auto CreateGroup = [&](uptr GroupId) { + auto CreateGroup = [&](uptr CompactPtrGroupBeg) { BatchGroup *BG = nullptr; TransferBatch *TB = nullptr; if (ClassId == SizeClassMap::BatchClassId) { @@ -462,7 +471,7 @@ TB->clear(); } - BG->GroupId = GroupId; + BG->CompactPtrGroupBeg = CompactPtrGroupBeg; // TODO(chiahungduan): Avoid the use of push_back() in `Batches`. BG->Batches.push_front(TB); BG->PushedBlocks = 0; @@ -504,7 +513,7 @@ if (ClassId == SizeClassMap::BatchClassId) { if (Cur == nullptr) { // Don't need to classify BatchClassId. - Cur = CreateGroup(/*GroupId=*/0); + Cur = CreateGroup(/*CompactPtrGroupBeg=*/0); Sci->FreeList.push_front(Cur); } InsertBlocks(Cur, Array, Size); @@ -515,12 +524,14 @@ // will be pushed next. `Prev` is the element right before `Cur`. BatchGroup *Prev = nullptr; - while (Cur != nullptr && compactPtrGroup(Array[0]) > Cur->GroupId) { + while (Cur != nullptr && + compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBeg) { Prev = Cur; Cur = Cur->Next; } - if (Cur == nullptr || compactPtrGroup(Array[0]) != Cur->GroupId) { + if (Cur == nullptr || + compactPtrGroup(Array[0]) != Cur->CompactPtrGroupBeg) { Cur = CreateGroup(compactPtrGroup(Array[0])); if (Prev == nullptr) Sci->FreeList.push_front(Cur); @@ -532,7 +543,7 @@ // id. if (SameGroup) { for (u32 I = 0; I < Size; ++I) - DCHECK_EQ(compactPtrGroup(Array[I]), Cur->GroupId); + DCHECK_EQ(compactPtrGroup(Array[I]), Cur->CompactPtrGroupBeg); InsertBlocks(Cur, Array, Size); return; @@ -543,15 +554,17 @@ u32 Count = 1; for (u32 I = 1; I < Size; ++I) { if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) { - DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->GroupId); + DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->CompactPtrGroupBeg); InsertBlocks(Cur, Array + I - Count, Count); - while (Cur != nullptr && compactPtrGroup(Array[I]) > Cur->GroupId) { + while (Cur != nullptr && + compactPtrGroup(Array[I]) > Cur->CompactPtrGroupBeg) { Prev = Cur; Cur = Cur->Next; } - if (Cur == nullptr || compactPtrGroup(Array[I]) != Cur->GroupId) { + if (Cur == nullptr || + compactPtrGroup(Array[I]) != Cur->CompactPtrGroupBeg) { Cur = CreateGroup(compactPtrGroup(Array[I])); DCHECK_NE(Prev, nullptr); Sci->FreeList.insert(Prev, Cur); @@ -745,7 +758,7 @@ const uptr Base = First * RegionSize; const uptr NumberOfRegions = Last - First + 1U; const uptr GroupSize = (1U << GroupSizeLog); - const uptr CurRegionGroupId = + const uptr CurGroupBeg = compactPtrGroup(compactPtr(ClassId, Sci->CurrentRegion)); ReleaseRecorder Recorder(Base); @@ -761,7 +774,7 @@ if (PushedBytesDelta * BlockSize < PageSize) continue; - uptr AllocatedGroupSize = BG.GroupId == CurRegionGroupId + uptr AllocatedGroupSize = BG.CompactPtrGroupBeg == CurGroupBeg ? Sci->CurrentRegionAllocated : GroupSize; if (AllocatedGroupSize == 0) @@ -792,7 +805,7 @@ // `CurrentRegionAllocated` of the current region. This exception excludes // the chance of doing range marking for the current region. const bool CanDoRangeMark = - GroupSize == RegionSize && BG.GroupId != CurRegionGroupId; + GroupSize == RegionSize && BG.CompactPtrGroupBeg != CurGroupBeg; if (!CanDoRangeMark || NumBlocks != MaxContainedBlocks) { if (CanDoRangeMark) @@ -805,10 +818,10 @@ } else { for (const auto &It : BG.Batches) for (u16 I = 0; I < It.getCount(); ++I) - DCHECK_EQ(compactPtrGroup(It.get(I)), BG.GroupId); + DCHECK_EQ(compactPtrGroup(It.get(I)), BG.CompactPtrGroupBeg); - const uptr From = batchGroupBase(BG.GroupId); - const uptr To = batchGroupBase(BG.GroupId) + AllocatedGroupSize; + const uptr From = decompactGroupBase(BG.CompactPtrGroupBeg); + const uptr To = From + AllocatedGroupSize; Context.markRangeAsAllCounted(From, To, Base); } } diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -47,6 +47,7 @@ typedef typename Config::PrimaryCompactPtrT CompactPtrT; static const uptr CompactPtrScale = Config::PrimaryCompactPtrScale; static const uptr GroupSizeLog = Config::PrimaryGroupSizeLog; + static const uptr GroupScale = GroupSizeLog - CompactPtrScale; typedef typename Config::SizeClassMap SizeClassMap; typedef SizeClassAllocator64 ThisT; typedef SizeClassAllocatorLocalCache CacheT; @@ -299,9 +300,6 @@ static uptr getRegionInfoArraySize() { return sizeof(RegionInfoArray); } uptr getCompactPtrBaseByClassId(uptr ClassId) { - // If we are not compacting pointers, base everything off of 0. - if (sizeof(CompactPtrT) == sizeof(uptr) && CompactPtrScale == 0) - return 0; return getRegionInfo(ClassId)->RegionBeg; } @@ -435,10 +433,12 @@ } static uptr compactPtrGroup(CompactPtrT CompactPtr) { - return static_cast(CompactPtr) >> (GroupSizeLog - CompactPtrScale); + const uptr Mask = (static_cast(1) << GroupScale) - 1; + return static_cast(CompactPtr) & ~Mask; } - static uptr batchGroupBase(uptr Base, uptr GroupId) { - return (GroupId << GroupSizeLog) + Base; + static uptr decompactGroupBase(uptr Base, uptr CompactPtrGroupBeg) { + DCHECK_EQ(CompactPtrGroupBeg % (static_cast(1) << (GroupScale)), 0U); + return Base + (CompactPtrGroupBeg << CompactPtrScale); } // Push the blocks to their batch group. The layout will be like, @@ -464,7 +464,7 @@ REQUIRES(Region->Mutex) { DCHECK_GT(Size, 0U); - auto CreateGroup = [&](uptr GroupId) { + auto CreateGroup = [&](uptr CompactPtrGroupBeg) { BatchGroup *BG = nullptr; TransferBatch *TB = nullptr; if (ClassId == SizeClassMap::BatchClassId) { @@ -523,7 +523,7 @@ TB->clear(); } - BG->GroupId = GroupId; + BG->CompactPtrGroupBeg = CompactPtrGroupBeg; // TODO(chiahungduan): Avoid the use of push_back() in `Batches`. BG->Batches.push_front(TB); BG->PushedBlocks = 0; @@ -565,7 +565,7 @@ if (ClassId == SizeClassMap::BatchClassId) { if (Cur == nullptr) { // Don't need to classify BatchClassId. - Cur = CreateGroup(/*GroupId=*/0); + Cur = CreateGroup(/*CompactPtrGroupBeg=*/0); Region->FreeList.push_front(Cur); } InsertBlocks(Cur, Array, Size); @@ -576,12 +576,14 @@ // will be pushed next. `Prev` is the element right before `Cur`. BatchGroup *Prev = nullptr; - while (Cur != nullptr && compactPtrGroup(Array[0]) > Cur->GroupId) { + while (Cur != nullptr && + compactPtrGroup(Array[0]) > Cur->CompactPtrGroupBeg) { Prev = Cur; Cur = Cur->Next; } - if (Cur == nullptr || compactPtrGroup(Array[0]) != Cur->GroupId) { + if (Cur == nullptr || + compactPtrGroup(Array[0]) != Cur->CompactPtrGroupBeg) { Cur = CreateGroup(compactPtrGroup(Array[0])); if (Prev == nullptr) Region->FreeList.push_front(Cur); @@ -593,7 +595,7 @@ // id. if (SameGroup) { for (u32 I = 0; I < Size; ++I) - DCHECK_EQ(compactPtrGroup(Array[I]), Cur->GroupId); + DCHECK_EQ(compactPtrGroup(Array[I]), Cur->CompactPtrGroupBeg); InsertBlocks(Cur, Array, Size); return; @@ -604,15 +606,17 @@ u32 Count = 1; for (u32 I = 1; I < Size; ++I) { if (compactPtrGroup(Array[I - 1]) != compactPtrGroup(Array[I])) { - DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->GroupId); + DCHECK_EQ(compactPtrGroup(Array[I - 1]), Cur->CompactPtrGroupBeg); InsertBlocks(Cur, Array + I - Count, Count); - while (Cur != nullptr && compactPtrGroup(Array[I]) > Cur->GroupId) { + while (Cur != nullptr && + compactPtrGroup(Array[I]) > Cur->CompactPtrGroupBeg) { Prev = Cur; Cur = Cur->Next; } - if (Cur == nullptr || compactPtrGroup(Array[I]) != Cur->GroupId) { + if (Cur == nullptr || + compactPtrGroup(Array[I]) != Cur->CompactPtrGroupBeg) { Cur = CreateGroup(compactPtrGroup(Array[I])); DCHECK_NE(Prev, nullptr); Region->FreeList.insert(Prev, Cur); @@ -823,40 +827,27 @@ continue; } - // Group boundary does not necessarily have the same alignment as Region. - // It may sit across a Region boundary. Which means that we may have the - // following two cases, - // - // 1. Group boundary sits before RegionBeg. - // - // (BatchGroupBeg) - // batchGroupBase RegionBeg BatchGroupEnd - // | | | - // v v v - // +------------+----------------+ - // \ / - // ------ GroupSize ------ - // - // 2. Group boundary sits after RegionBeg. + // Group boundary is always GroupSize-aligned from CompactPtr base. The + // layout of memory groups is like, // - // (BatchGroupBeg) - // RegionBeg batchGroupBase BatchGroupEnd - // | | | - // v v v - // +-----------+-----------------------------+ - // \ / - // ------ GroupSize ------ + // (CompactPtrBase) + // #1 CompactPtrGroupBeg #2 CompactPtrGroupBeg ... + // | | | + // v v v + // +-----------------------+-----------------------+ + // \ / \ / + // --- GroupSize --- --- GroupSize --- // - // Note that in the first case, the group range before RegionBeg is never - // used. Therefore, while calculating the used group size, we should - // exclude that part to get the correct size. + // After decompacting the CompactPtrGroupBeg, we expect the alignment + // property is held as well. const uptr BatchGroupBeg = - Max(batchGroupBase(CompactPtrBase, BG->GroupId), Region->RegionBeg); + decompactGroupBase(CompactPtrBase, BG->CompactPtrGroupBeg); + DCHECK_LE(Region->RegionBeg, BatchGroupBeg); DCHECK_GE(AllocatedUserEnd, BatchGroupBeg); - const uptr BatchGroupEnd = - batchGroupBase(CompactPtrBase, BG->GroupId) + GroupSize; + DCHECK_EQ((Region->RegionBeg - BatchGroupBeg) % GroupSize, 0U); + const uptr BatchGroupEnd = BatchGroupBeg + GroupSize; const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd - ? BatchGroupEnd - BatchGroupBeg + ? GroupSize : AllocatedUserEnd - BatchGroupBeg; if (AllocatedGroupSize == 0) { Prev = BG, BG = BG->Next; @@ -893,11 +884,11 @@ if (GroupToRelease.empty()) return 0; - const uptr ReleaseBeg = - Max(batchGroupBase(CompactPtrBase, GroupToRelease.front()->GroupId), - Region->RegionBeg); + const uptr ReleaseBeg = decompactGroupBase( + CompactPtrBase, GroupToRelease.front()->CompactPtrGroupBeg); const uptr LastGroupEnd = - Min(batchGroupBase(CompactPtrBase, GroupToRelease.back()->GroupId) + + Min(decompactGroupBase(CompactPtrBase, + GroupToRelease.back()->CompactPtrGroupBeg) + GroupSize, AllocatedUserEnd); // The last block may straddle the group boundary. Rounding up to BlockSize @@ -915,15 +906,11 @@ for (BatchGroup &BG : GroupToRelease) { BG.PushedBlocksAtLastCheckpoint = BG.PushedBlocks; - // TODO(chiahungduan): Replace GroupId with BatchGroupBeg to simplify the - // calculation here and above (where we determine the set of groups to - // release). const uptr BatchGroupBeg = - Max(batchGroupBase(CompactPtrBase, BG.GroupId), Region->RegionBeg); - const uptr BatchGroupEnd = - batchGroupBase(CompactPtrBase, BG.GroupId) + GroupSize; + decompactGroupBase(CompactPtrBase, BG.CompactPtrGroupBeg); + const uptr BatchGroupEnd = BatchGroupBeg + GroupSize; const uptr AllocatedGroupSize = AllocatedUserEnd >= BatchGroupEnd - ? BatchGroupEnd - BatchGroupBeg + ? GroupSize : AllocatedUserEnd - BatchGroupBeg; const uptr BatchGroupUsedEnd = BatchGroupBeg + AllocatedGroupSize; const bool BlockAlignedWithUsedEnd = @@ -945,7 +932,7 @@ } else { for (const auto &It : BG.Batches) for (u16 I = 0; I < It.getCount(); ++I) - DCHECK_EQ(compactPtrGroup(It.get(I)), BG.GroupId); + DCHECK_EQ(compactPtrGroup(It.get(I)), BG.CompactPtrGroupBeg); Context.markRangeAsAllCounted(BatchGroupBeg, BatchGroupUsedEnd, Region->RegionBeg); @@ -974,9 +961,10 @@ break; } - DCHECK_NE(BG->GroupId, GroupToRelease.front()->GroupId); + DCHECK_NE(BG->CompactPtrGroupBeg, + GroupToRelease.front()->CompactPtrGroupBeg); - if (BG->GroupId < GroupToRelease.front()->GroupId) + if (BG->CompactPtrGroupBeg < GroupToRelease.front()->CompactPtrGroupBeg) continue; BatchGroup *Cur = GroupToRelease.front();