diff --git a/compiler-rt/lib/scudo/standalone/chunk.h b/compiler-rt/lib/scudo/standalone/chunk.h --- a/compiler-rt/lib/scudo/standalone/chunk.h +++ b/compiler-rt/lib/scudo/standalone/chunk.h @@ -85,7 +85,7 @@ constexpr uptr ChecksumMask = (1UL << 16) - 1; constexpr uptr getHeaderSize() { - return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG); + return roundUp(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG); } inline AtomicPackedHeader *getAtomicHeader(void *Ptr) { diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -342,7 +342,7 @@ // to be sure that there will be an address in the block that will satisfy // the alignment. const uptr NeededSize = - roundUpTo(Size, MinAlignment) + + roundUp(Size, MinAlignment) + ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize()); // Takes care of extravagantly large sizes as well as integer overflows. @@ -402,7 +402,7 @@ const uptr BlockUptr = reinterpret_cast(Block); const uptr UnalignedUserPtr = BlockUptr + Chunk::getHeaderSize(); - const uptr UserPtr = roundUpTo(UnalignedUserPtr, Alignment); + const uptr UserPtr = roundUp(UnalignedUserPtr, Alignment); void *Ptr = reinterpret_cast(UserPtr); void *TaggedPtr = Ptr; @@ -461,7 +461,7 @@ PrevUserPtr == UserPtr && (TaggedUserPtr = loadTag(UserPtr)) != UserPtr) { uptr PrevEnd = TaggedUserPtr + Header.SizeOrUnusedBytes; - const uptr NextPage = roundUpTo(TaggedUserPtr, getPageSizeCached()); + const uptr NextPage = roundUp(TaggedUserPtr, getPageSizeCached()); if (NextPage < PrevEnd && loadTag(NextPage) != NextPage) PrevEnd = NextPage; TaggedPtr = reinterpret_cast(TaggedUserPtr); @@ -474,8 +474,8 @@ // was freed, it would not have been retagged and thus zeroed, and // therefore it needs to be zeroed now. memset(TaggedPtr, 0, - Min(Size, roundUpTo(PrevEnd - TaggedUserPtr, - archMemoryTagGranuleSize()))); + Min(Size, roundUp(PrevEnd - TaggedUserPtr, + archMemoryTagGranuleSize()))); } else if (Size) { // Clear any stack metadata that may have previously been stored in // the chunk data. @@ -1241,15 +1241,15 @@ void resizeTaggedChunk(uptr OldPtr, uptr NewPtr, uptr NewSize, uptr BlockEnd) { - uptr RoundOldPtr = roundUpTo(OldPtr, archMemoryTagGranuleSize()); + uptr RoundOldPtr = roundUp(OldPtr, archMemoryTagGranuleSize()); uptr RoundNewPtr; if (RoundOldPtr >= NewPtr) { // If the allocation is shrinking we just need to set the tag past the end // of the allocation to 0. See explanation in storeEndMarker() above. - RoundNewPtr = roundUpTo(NewPtr, archMemoryTagGranuleSize()); + RoundNewPtr = roundUp(NewPtr, archMemoryTagGranuleSize()); } else { // Set the memory tag of the region - // [RoundOldPtr, roundUpTo(NewPtr, archMemoryTagGranuleSize())) + // [RoundOldPtr, roundUp(NewPtr, archMemoryTagGranuleSize())) // to the pointer tag stored in OldPtr. RoundNewPtr = storeTags(RoundOldPtr, NewPtr); } @@ -1505,7 +1505,8 @@ MapPlatformData Data = {}; RawRingBuffer = static_cast( map(/*Addr=*/nullptr, - roundUpTo(ringBufferSizeInBytes(AllocationRingBufferSize), getPageSizeCached()), + roundUp(ringBufferSizeInBytes(AllocationRingBufferSize), + getPageSizeCached()), "AllocatorRingBuffer", /*Flags=*/0, &Data)); auto *RingBuffer = reinterpret_cast(RawRingBuffer); RingBuffer->Size = AllocationRingBufferSize; diff --git a/compiler-rt/lib/scudo/standalone/common.h b/compiler-rt/lib/scudo/standalone/common.h --- a/compiler-rt/lib/scudo/standalone/common.h +++ b/compiler-rt/lib/scudo/standalone/common.h @@ -27,17 +27,31 @@ return D; } -inline constexpr uptr roundUpTo(uptr X, uptr Boundary) { +inline constexpr bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; } + +inline constexpr uptr roundUp(uptr X, uptr Boundary) { + DCHECK(isPowerOfTwo(Boundary)); return (X + Boundary - 1) & ~(Boundary - 1); } +inline constexpr uptr roundUpSlow(uptr X, uptr Boundary) { + return ((X + Boundary - 1) / Boundary) * Boundary; +} -inline constexpr uptr roundDownTo(uptr X, uptr Boundary) { +inline constexpr uptr roundDown(uptr X, uptr Boundary) { + DCHECK(isPowerOfTwo(Boundary)); return X & ~(Boundary - 1); } +inline constexpr uptr roundDownSlow(uptr X, uptr Boundary) { + return (X / Boundary) * Boundary; +} inline constexpr bool isAligned(uptr X, uptr Alignment) { + DCHECK(isPowerOfTwo(Alignment)); return (X & (Alignment - 1)) == 0; } +inline constexpr bool isAlignedSlow(uptr X, uptr Alignment) { + return X % Alignment == 0; +} template constexpr T Min(T A, T B) { return A < B ? A : B; } @@ -49,14 +63,12 @@ B = Tmp; } -inline bool isPowerOfTwo(uptr X) { return (X & (X - 1)) == 0; } - inline uptr getMostSignificantSetBitIndex(uptr X) { DCHECK_NE(X, 0U); return SCUDO_WORDSIZE - 1U - static_cast(__builtin_clzl(X)); } -inline uptr roundUpToPowerOfTwo(uptr Size) { +inline uptr roundUpPowerOfTwo(uptr Size) { DCHECK(Size); if (isPowerOfTwo(Size)) return Size; diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -341,7 +341,7 @@ else MapSize = RegionSize; } else { - Region = roundUpTo(MapBase, RegionSize); + Region = roundUp(MapBase, RegionSize); unmap(reinterpret_cast(MapBase), Region - MapBase); MapSize = RegionSize; } diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -55,7 +55,7 @@ static uptr getSizeByClassId(uptr ClassId) { return (ClassId == SizeClassMap::BatchClassId) - ? roundUpTo(sizeof(TransferBatch), 1U << CompactPtrScale) + ? roundUp(sizeof(TransferBatch), 1U << CompactPtrScale) : SizeClassMap::getSizeByClassId(ClassId); } @@ -638,7 +638,7 @@ if (TotalUserBytes > MappedUser) { // Do the mmap for the user memory. const uptr MapSize = - roundUpTo(TotalUserBytes - MappedUser, MapSizeIncrement); + roundUp(TotalUserBytes - MappedUser, MapSizeIncrement); const uptr RegionBase = RegionBeg - getRegionBaseByClassId(ClassId); if (UNLIKELY(RegionBase + MappedUser + MapSize > RegionSize)) { Region->Exhausted = true; diff --git a/compiler-rt/lib/scudo/standalone/release.h b/compiler-rt/lib/scudo/standalone/release.h --- a/compiler-rt/lib/scudo/standalone/release.h +++ b/compiler-rt/lib/scudo/standalone/release.h @@ -73,7 +73,7 @@ Mutex.unlock(); else unmap(reinterpret_cast(Buffer), - roundUpTo(BufferSize, getPageSizeCached())); + roundUp(BufferSize, getPageSizeCached())); Buffer = nullptr; } @@ -94,7 +94,7 @@ // Rounding counter storage size up to the power of two allows for using // bit shifts calculating particular counter's Index and offset. const uptr CounterSizeBits = - roundUpToPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1); + roundUpPowerOfTwo(getMostSignificantSetBitIndex(MaxValue) + 1); DCHECK_LE(CounterSizeBits, MaxCounterBits); CounterSizeBitsLog = getLog2(CounterSizeBits); CounterMask = ~(static_cast(0)) >> (MaxCounterBits - CounterSizeBits); @@ -105,7 +105,7 @@ BitOffsetMask = PackingRatio - 1; SizePerRegion = - roundUpTo(NumCounters, static_cast(1U) << PackingRatioLog) >> + roundUp(NumCounters, static_cast(1U) << PackingRatioLog) >> PackingRatioLog; BufferSize = SizePerRegion * sizeof(*Buffer) * Regions; if (BufferSize <= (StaticBufferCount * sizeof(Buffer[0])) && @@ -120,7 +120,7 @@ const uptr MmapFlags = MAP_ALLOWNOMEM | (SCUDO_FUCHSIA ? MAP_PRECOMMIT : 0); Buffer = reinterpret_cast( - map(nullptr, roundUpTo(BufferSize, getPageSizeCached()), + map(nullptr, roundUp(BufferSize, getPageSizeCached()), "scudo:counters", MmapFlags, &MapData)); } } @@ -266,7 +266,7 @@ } } - PagesCount = roundUpTo(RegionSize, PageSize) / PageSize; + PagesCount = roundUp(RegionSize, PageSize) / PageSize; PageSizeLog = getLog2(PageSize); RoundedRegionSize = PagesCount << PageSizeLog; RoundedSize = NumberOfRegions * RoundedRegionSize; diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -244,7 +244,7 @@ continue; const uptr CommitSize = Entries[I].CommitSize; const uptr AllocPos = - roundDownTo(CommitBase + CommitSize - Size, Alignment); + roundDown(CommitBase + CommitSize - Size, Alignment); HeaderPos = AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize(); if (HeaderPos > CommitBase + CommitSize) @@ -510,9 +510,9 @@ Alignment = Max(Alignment, uptr(1U) << SCUDO_MIN_ALIGNMENT_LOG); const uptr PageSize = getPageSizeCached(); uptr RoundedSize = - roundUpTo(roundUpTo(Size, Alignment) + LargeBlock::getHeaderSize() + - Chunk::getHeaderSize(), - PageSize); + roundUp(roundUp(Size, Alignment) + LargeBlock::getHeaderSize() + + Chunk::getHeaderSize(), + PageSize); if (Alignment > PageSize) RoundedSize += Alignment - PageSize; @@ -559,7 +559,7 @@ // For alignments greater than or equal to a page, the user pointer (eg: the // pointer that is returned by the C or C++ allocation APIs) ends up on a // page boundary , and our headers will live in the preceding page. - CommitBase = roundUpTo(MapBase + PageSize + 1, Alignment) - PageSize; + CommitBase = roundUp(MapBase + PageSize + 1, Alignment) - PageSize; const uptr NewMapBase = CommitBase - PageSize; DCHECK_GE(NewMapBase, MapBase); // We only trim the extra memory on 32-bit platforms: 64-bit platforms @@ -569,7 +569,7 @@ MapBase = NewMapBase; } const uptr NewMapEnd = - CommitBase + PageSize + roundUpTo(Size, PageSize) + PageSize; + CommitBase + PageSize + roundUp(Size, PageSize) + PageSize; DCHECK_LE(NewMapEnd, MapEnd); if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) { unmap(reinterpret_cast(NewMapEnd), MapEnd - NewMapEnd, 0, &Data); @@ -578,7 +578,7 @@ } const uptr CommitSize = MapEnd - PageSize - CommitBase; - const uptr AllocPos = roundDownTo(CommitBase + CommitSize - Size, Alignment); + const uptr AllocPos = roundDown(CommitBase + CommitSize - Size, Alignment); mapSecondary(Options, CommitBase, CommitSize, AllocPos, 0, &Data); const uptr HeaderPos = AllocPos - Chunk::getHeaderSize() - LargeBlock::getHeaderSize(); diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -39,7 +39,7 @@ if (Alignment < MinAlignment) Alignment = MinAlignment; const scudo::uptr NeededSize = - scudo::roundUpTo(Size, MinAlignment) + + scudo::roundUp(Size, MinAlignment) + ((Alignment > MinAlignment) ? Alignment : scudo::Chunk::getHeaderSize()); return AllocatorT::PrimaryT::canAllocate(NeededSize); } @@ -48,7 +48,7 @@ void checkMemoryTaggingMaybe(AllocatorT *Allocator, void *P, scudo::uptr Size, scudo::uptr Alignment) { const scudo::uptr MinAlignment = 1UL << SCUDO_MIN_ALIGNMENT_LOG; - Size = scudo::roundUpTo(Size, MinAlignment); + Size = scudo::roundUp(Size, MinAlignment); if (Allocator->useMemoryTaggingTestOnly()) EXPECT_DEATH( { diff --git a/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp b/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/memtag_test.cpp @@ -163,7 +163,7 @@ uptr TaggedBegin = addFixedTag(NoTagBegin, Tag); uptr TaggedEnd = addFixedTag(NoTagEnd, Tag); - EXPECT_EQ(roundUpTo(TaggedEnd, archMemoryTagGranuleSize()), + EXPECT_EQ(roundUp(TaggedEnd, archMemoryTagGranuleSize()), storeTags(TaggedBegin, TaggedEnd)); uptr LoadPtr = Addr; diff --git a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/release_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/release_test.cpp @@ -29,7 +29,7 @@ // Verify the packing ratio, the counter is Expected to be packed into the // closest power of 2 bits. scudo::RegionPageMap PageMap(1U, SCUDO_WORDSIZE, 1UL << I); - EXPECT_EQ(sizeof(scudo::uptr) * scudo::roundUpToPowerOfTwo(I + 1), + EXPECT_EQ(sizeof(scudo::uptr) * scudo::roundUpPowerOfTwo(I + 1), PageMap.getBufferSize()); } @@ -238,7 +238,7 @@ InFreeRange = false; // Verify that all entire memory pages covered by this range of free // chunks were released. - scudo::uptr P = scudo::roundUpTo(CurrentFreeRangeStart, PageSize); + scudo::uptr P = scudo::roundUp(CurrentFreeRangeStart, PageSize); while (P + PageSize <= CurrentBlock) { const bool PageReleased = Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end(); @@ -254,9 +254,9 @@ } if (InFreeRange) { - scudo::uptr P = scudo::roundUpTo(CurrentFreeRangeStart, PageSize); + scudo::uptr P = scudo::roundUp(CurrentFreeRangeStart, PageSize); const scudo::uptr EndPage = - scudo::roundUpTo(MaxBlocks * BlockSize, PageSize); + scudo::roundUp(MaxBlocks * BlockSize, PageSize); while (P + PageSize <= EndPage) { const bool PageReleased = Recorder.ReportedPages.find(P) != Recorder.ReportedPages.end(); diff --git a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/secondary_test.cpp @@ -64,7 +64,7 @@ P = L->allocate(Options, Size + Align, Align); EXPECT_NE(P, nullptr); void *AlignedP = reinterpret_cast( - scudo::roundUpTo(reinterpret_cast(P), Align)); + scudo::roundUp(reinterpret_cast(P), Align)); memset(AlignedP, 'A', Size); L->deallocate(Options, P); @@ -122,7 +122,7 @@ // combined allocator. TEST_F(MapAllocatorTest, SecondaryCombinations) { constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16); - constexpr scudo::uptr HeaderSize = scudo::roundUpTo(8, MinAlign); + constexpr scudo::uptr HeaderSize = scudo::roundUp(8, MinAlign); for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) { for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16; AlignLog++) { @@ -131,13 +131,13 @@ if (static_cast(1U << SizeLog) + Delta <= 0) continue; const scudo::uptr UserSize = - scudo::roundUpTo((1U << SizeLog) + Delta, MinAlign); + scudo::roundUp((1U << SizeLog) + Delta, MinAlign); const scudo::uptr Size = HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0); void *P = Allocator->allocate(Options, Size, Align); EXPECT_NE(P, nullptr); void *AlignedP = reinterpret_cast( - scudo::roundUpTo(reinterpret_cast(P), Align)); + scudo::roundUp(reinterpret_cast(P), Align)); memset(AlignedP, 0xff, UserSize); Allocator->deallocate(Options, P); } diff --git a/compiler-rt/lib/scudo/standalone/trusty.cpp b/compiler-rt/lib/scudo/standalone/trusty.cpp --- a/compiler-rt/lib/scudo/standalone/trusty.cpp +++ b/compiler-rt/lib/scudo/standalone/trusty.cpp @@ -37,7 +37,7 @@ uptr Start; uptr End; - Start = roundUpTo(ProgramBreak, SBRK_ALIGN); + Start = roundUp(ProgramBreak, SBRK_ALIGN); // Don't actually extend the heap if MAP_NOACCESS flag is set since this is // the case where Scudo tries to reserve a memory region without mapping // physical pages. @@ -45,7 +45,7 @@ return reinterpret_cast(Start); // Attempt to extend the heap by Size bytes using _trusty_brk. - End = roundUpTo(Start + Size, SBRK_ALIGN); + End = roundUp(Start + Size, SBRK_ALIGN); ProgramBreak = reinterpret_cast(_trusty_brk(reinterpret_cast(End))); if (ProgramBreak < End) { diff --git a/compiler-rt/lib/scudo/standalone/vector.h b/compiler-rt/lib/scudo/standalone/vector.h --- a/compiler-rt/lib/scudo/standalone/vector.h +++ b/compiler-rt/lib/scudo/standalone/vector.h @@ -40,7 +40,7 @@ void push_back(const T &Element) { DCHECK_LE(Size, capacity()); if (Size == capacity()) { - const uptr NewCapacity = roundUpToPowerOfTwo(Size + 1); + const uptr NewCapacity = roundUpPowerOfTwo(Size + 1); reallocate(NewCapacity); } memcpy(&Data[Size++], &Element, sizeof(T)); @@ -82,7 +82,7 @@ void reallocate(uptr NewCapacity) { DCHECK_GT(NewCapacity, 0); DCHECK_LE(Size, NewCapacity); - NewCapacity = roundUpTo(NewCapacity * sizeof(T), getPageSizeCached()); + NewCapacity = roundUp(NewCapacity * sizeof(T), getPageSizeCached()); T *NewData = reinterpret_cast( map(nullptr, NewCapacity, "scudo:vector", 0, &MapData)); memcpy(NewData, Data, Size * sizeof(T)); diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc --- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc +++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc @@ -91,7 +91,7 @@ alignment = 1U; } else { if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) - alignment = scudo::roundUpToPowerOfTwo(alignment); + alignment = scudo::roundUpPowerOfTwo(alignment); } } else { if (UNLIKELY(!scudo::isPowerOfTwo(alignment))) { @@ -131,9 +131,9 @@ scudo::reportPvallocOverflow(size); } // pvalloc(0) should allocate one page. - return scudo::setErrnoOnNull(SCUDO_ALLOCATOR.allocate( - size ? scudo::roundUpTo(size, PageSize) : PageSize, - scudo::Chunk::Origin::Memalign, PageSize)); + return scudo::setErrnoOnNull( + SCUDO_ALLOCATOR.allocate(size ? scudo::roundUp(size, PageSize) : PageSize, + scudo::Chunk::Origin::Memalign, PageSize)); } INTERFACE WEAK void *SCUDO_PREFIX(realloc)(void *ptr, size_t size) { diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h b/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h --- a/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h +++ b/compiler-rt/lib/scudo/standalone/wrappers_c_checks.h @@ -64,7 +64,7 @@ // Returns true if the size passed to pvalloc overflows when rounded to the next // multiple of PageSize. inline bool checkForPvallocOverflow(uptr Size, uptr PageSize) { - return roundUpTo(Size, PageSize) < Size; + return roundUp(Size, PageSize) < Size; } } // namespace scudo