Index: compiler-rt/trunk/lib/scudo/scudo_allocator.h =================================================================== --- compiler-rt/trunk/lib/scudo/scudo_allocator.h +++ compiler-rt/trunk/lib/scudo/scudo_allocator.h @@ -59,9 +59,17 @@ const uptr MinAlignment = 1 << MinAlignmentLog; const uptr MaxAlignment = 1 << MaxAlignmentLog; -const uptr ChunkHeaderSize = sizeof(PackedHeader); -const uptr AlignedChunkHeaderSize = - (ChunkHeaderSize + MinAlignment - 1) & ~(MinAlignment - 1); +// constexpr version of __sanitizer::RoundUp without the extraneous CHECK. +// This way we can use it in constexpr variables and functions declarations. +constexpr uptr RoundUpTo(uptr Size, uptr Boundary) { + return (Size + Boundary - 1) & ~(Boundary - 1); +} + +namespace Chunk { + constexpr uptr getHeaderSize() { + return RoundUpTo(sizeof(PackedHeader), MinAlignment); + } +} #if SANITIZER_CAN_USE_ALLOCATOR64 const uptr AllocatorSpace = ~0ULL; @@ -97,11 +105,6 @@ typedef SizeClassAllocator32 PrimaryAllocator; #endif // SANITIZER_CAN_USE_ALLOCATOR64 -// __sanitizer::RoundUp has a CHECK that is extraneous for us. Use our own. -INLINE uptr RoundUpTo(uptr Size, uptr Boundary) { - return (Size + Boundary - 1) & ~(Boundary - 1); -} - #include "scudo_allocator_secondary.h" #include "scudo_allocator_combined.h" Index: compiler-rt/trunk/lib/scudo/scudo_allocator.cpp =================================================================== --- compiler-rt/trunk/lib/scudo/scudo_allocator.cpp +++ compiler-rt/trunk/lib/scudo/scudo_allocator.cpp @@ -69,18 +69,17 @@ // prevent this, we work with a local copy of the header. static INLINE void *getBackendPtr(const void *Ptr, UnpackedHeader *Header) { return reinterpret_cast(reinterpret_cast(Ptr) - - AlignedChunkHeaderSize - - (Header->Offset << MinAlignmentLog)); + getHeaderSize() - (Header->Offset << MinAlignmentLog)); } static INLINE AtomicPackedHeader *getAtomicHeader(void *Ptr) { return reinterpret_cast(reinterpret_cast(Ptr) - - AlignedChunkHeaderSize); + getHeaderSize()); } static INLINE const AtomicPackedHeader *getConstAtomicHeader(const void *Ptr) { return reinterpret_cast( - reinterpret_cast(Ptr) - AlignedChunkHeaderSize); + reinterpret_cast(Ptr) - getHeaderSize()); } static INLINE bool isAligned(const void *Ptr) { @@ -92,9 +91,8 @@ static INLINE uptr getUsableSize(const void *Ptr, UnpackedHeader *Header) { const uptr Size = getBackendAllocator().getActuallyAllocatedSize( getBackendPtr(Ptr, Header), Header->ClassId); - if (Size == 0) - return 0; - return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog); + DCHECK_NE(Size, 0); + return Size - getHeaderSize() - (Header->Offset << MinAlignmentLog); } // Compute the checksum of the chunk pointer and its header. @@ -251,7 +249,7 @@ const uptr MaxPrimaryAlignment = 1 << MostSignificantSetBitIndex(SizeClassMap::kMaxSize - MinAlignment); const uptr MaxOffset = - (MaxPrimaryAlignment - AlignedChunkHeaderSize) >> MinAlignmentLog; + (MaxPrimaryAlignment - Chunk::getHeaderSize()) >> MinAlignmentLog; Header.Offset = MaxOffset; if (Header.Offset != MaxOffset) { dieWithMessage("ERROR: the maximum possible offset doesn't fit in the " @@ -368,9 +366,10 @@ if (UNLIKELY(Size == 0)) Size = 1; - uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize; - uptr AlignedSize = (Alignment > MinAlignment) ? - NeededSize + (Alignment - AlignedChunkHeaderSize) : NeededSize; + const uptr NeededSize = RoundUpTo(Size, MinAlignment) + + Chunk::getHeaderSize(); + const uptr AlignedSize = (Alignment > MinAlignment) ? + NeededSize + (Alignment - Chunk::getHeaderSize()) : NeededSize; if (UNLIKELY(AlignedSize >= MaxAllowedMallocSize)) return FailureHandler::OnBadRequest(); @@ -403,7 +402,7 @@ BackendAllocator.getActuallyAllocatedSize(BackendPtr, ClassId)); UnpackedHeader Header = {}; - uptr UserPtr = reinterpret_cast(BackendPtr) + AlignedChunkHeaderSize; + uptr UserPtr = reinterpret_cast(BackendPtr) + Chunk::getHeaderSize(); if (UNLIKELY(!IsAligned(UserPtr, Alignment))) { // Since the Secondary takes care of alignment, a non-aligned pointer // means it is from the Primary. It is also the only case where the offset @@ -505,7 +504,7 @@ } } } - uptr Size = Header.ClassId ? Header.SizeOrUnusedBytes : + const uptr Size = Header.ClassId ? Header.SizeOrUnusedBytes : Chunk::getUsableSize(Ptr, &Header) - Header.SizeOrUnusedBytes; if (DeleteSizeMismatch) { if (DeleteSize && DeleteSize != Size) { Index: compiler-rt/trunk/lib/scudo/scudo_allocator_secondary.h =================================================================== --- compiler-rt/trunk/lib/scudo/scudo_allocator_secondary.h +++ compiler-rt/trunk/lib/scudo/scudo_allocator_secondary.h @@ -28,7 +28,7 @@ } void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) { - const uptr UserSize = Size - AlignedChunkHeaderSize; + const uptr UserSize = Size - Chunk::getHeaderSize(); // The Scudo frontend prevents us from allocating more than // MaxAllowedMallocSize, so integer overflow checks would be superfluous. uptr MapSize = Size + AlignedReservedAddressRangeSize; @@ -80,7 +80,7 @@ // Actually mmap the memory, preserving the guard pages on either side CHECK_EQ(MapBeg + PageSize, AddressRange.Map(MapBeg + PageSize, MapSize - 2 * PageSize)); - const uptr Ptr = UserBeg - AlignedChunkHeaderSize; + const uptr Ptr = UserBeg - Chunk::getHeaderSize(); ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr); *StoredRange = AddressRange; @@ -129,9 +129,9 @@ } static constexpr uptr AlignedReservedAddressRangeSize = - (sizeof(ReservedAddressRange) + MinAlignment - 1) & ~(MinAlignment - 1); + RoundUpTo(sizeof(ReservedAddressRange), MinAlignment); static constexpr uptr HeadersSize = - AlignedReservedAddressRangeSize + AlignedChunkHeaderSize; + AlignedReservedAddressRangeSize + Chunk::getHeaderSize(); uptr PageSizeCached; SpinMutex StatsMutex;