Index: lib/sanitizer_common/sanitizer_fuchsia.cc =================================================================== --- lib/sanitizer_common/sanitizer_fuchsia.cc +++ lib/sanitizer_common/sanitizer_fuchsia.cc @@ -266,6 +266,10 @@ CHECK((addr_as_void == base_) || (addr + size == base_as_uptr + size_)); CHECK_LE(size, size_); UnmapOrDie(reinterpret_cast(addr), size); + if (addr_as_void == base_) { + base_ = reinterpret_cast(addr + size); + } + size_ = size_ - size; } // MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator. Index: lib/sanitizer_common/sanitizer_posix_libcdep.cc =================================================================== --- lib/sanitizer_common/sanitizer_posix_libcdep.cc +++ lib/sanitizer_common/sanitizer_posix_libcdep.cc @@ -370,6 +370,10 @@ CHECK((addr_as_void == base_) || (addr + size == base_as_uptr + size_)); CHECK_LE(size, size_); UnmapOrDie(reinterpret_cast(addr), size); + if (addr_as_void == base_) { + base_ = reinterpret_cast(addr + size); + } + size_ = size_ - size; } void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { Index: lib/sanitizer_common/sanitizer_win.cc =================================================================== --- lib/sanitizer_common/sanitizer_win.cc +++ lib/sanitizer_common/sanitizer_win.cc @@ -255,6 +255,10 @@ // Only unmap if it covers the entire range. CHECK((addr == base_as_uptr) && (size == size_)); UnmapOrDie(addr_as_void, size); + if (addr_as_void == base_) { + base_ = reinterpret_cast(addr + size); + } + size_ = size_ - size; } void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { Index: lib/scudo/scudo_allocator_secondary.h =================================================================== --- lib/scudo/scudo_allocator_secondary.h +++ lib/scudo/scudo_allocator_secondary.h @@ -31,14 +31,15 @@ uptr UserSize = Size - AlignedChunkHeaderSize; // The Scudo frontend prevents us from allocating more than // MaxAllowedMallocSize, so integer overflow checks would be superfluous. - uptr MapSize = Size + SecondaryHeaderSize; + uptr MapSize = Size + AlignedReservedAddressRangeSize; if (Alignment > MinAlignment) MapSize += Alignment; MapSize = RoundUpTo(MapSize, PageSize); // Account for 2 guard pages, one before and one after the chunk. MapSize += 2 * PageSize; - uptr MapBeg = reinterpret_cast(MmapNoAccess(MapSize)); + ReservedAddressRange AddressRange; + uptr MapBeg = AddressRange.Init(MapSize); if (MapBeg == ~static_cast(0)) return ReturnNullOrDieOnFailure::OnOOM(); // A page-aligned pointer is assumed after that, so check it now. @@ -61,27 +62,27 @@ PageSize; CHECK_GE(NewMapBeg, MapBeg); if (NewMapBeg != MapBeg) { - UnmapOrDie(reinterpret_cast(MapBeg), NewMapBeg - MapBeg); + AddressRange.Unmap(MapBeg, NewMapBeg - MapBeg); MapBeg = NewMapBeg; } UserEnd = UserBeg + UserSize; } uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize; if (NewMapEnd != MapEnd) { - UnmapOrDie(reinterpret_cast(NewMapEnd), MapEnd - NewMapEnd); + AddressRange.Unmap(NewMapEnd, MapEnd - NewMapEnd); MapEnd = NewMapEnd; } MapSize = MapEnd - MapBeg; } CHECK_LE(UserEnd, MapEnd - PageSize); - // Actually mmap the memory, preserving the guard pages on either side. - CHECK_EQ(MapBeg + PageSize, reinterpret_cast( - MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize))); + // Actually mmap the memory, preserving the guard pages on either side + CHECK_EQ(MapBeg + PageSize, + AddressRange.Map(MapBeg + PageSize, MapSize - 2 * PageSize)); uptr Ptr = UserBeg - AlignedChunkHeaderSize; - SecondaryHeader *Header = getHeader(Ptr); - Header->MapBeg = MapBeg; - Header->MapSize = MapSize; + ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr); + *StoredRange = AddressRange; + // The primary adds the whole class size to the stats when allocating a // chunk, so we will do something similar here. But we will not account for // the guard pages. @@ -95,41 +96,41 @@ } void Deallocate(AllocatorStats *Stats, void *Ptr) { - SecondaryHeader *Header = getHeader(Ptr); + // Since we're unmapping the entirety of where the ReservedAddressRange + // actually is, copy onto the stack. + ReservedAddressRange AddressRange = *getReservedAddressRange(Ptr); { SpinMutexLock l(&StatsMutex); - Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize); - Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize); + Stats->Sub(AllocatorStatAllocated, AddressRange.size() - 2 * PageSize); + Stats->Sub(AllocatorStatMapped, AddressRange.size() - 2 * PageSize); } - UnmapOrDie(reinterpret_cast(Header->MapBeg), Header->MapSize); + AddressRange.Unmap(reinterpret_cast(AddressRange.base()), + AddressRange.size()); } uptr GetActuallyAllocatedSize(void *Ptr) { - SecondaryHeader *Header = getHeader(Ptr); - // Deduct PageSize as MapSize includes the trailing guard page. - uptr MapEnd = Header->MapBeg + Header->MapSize - PageSize; + ReservedAddressRange *StoredRange = getReservedAddressRange(Ptr); + // Deduct PageSize as ReservedAddressRange size includes the trailing guard + // page. + uptr MapEnd = reinterpret_cast(StoredRange->base()) + + StoredRange->size() - PageSize; return MapEnd - reinterpret_cast(Ptr); } private: - // A Secondary allocated chunk header contains the base of the mapping and - // its size, which comprises the guard pages. - struct SecondaryHeader { - uptr MapBeg; - uptr MapSize; - }; - // Check that sizeof(SecondaryHeader) is a multiple of MinAlignment. - COMPILER_CHECK((sizeof(SecondaryHeader) & (MinAlignment - 1)) == 0); - - SecondaryHeader *getHeader(uptr Ptr) { - return reinterpret_cast(Ptr - sizeof(SecondaryHeader)); - } - SecondaryHeader *getHeader(const void *Ptr) { - return getHeader(reinterpret_cast(Ptr)); + ReservedAddressRange *getReservedAddressRange(uptr Ptr) { + return reinterpret_cast( + Ptr - sizeof(ReservedAddressRange)); + } + ReservedAddressRange *getReservedAddressRange(const void *Ptr) { + return getReservedAddressRange(reinterpret_cast(Ptr)); } - const uptr SecondaryHeaderSize = sizeof(SecondaryHeader); - const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize; + const uptr AlignedReservedAddressRangeSize = + RoundUpTo(sizeof(ReservedAddressRange), MinAlignment); + const uptr HeadersSize = + AlignedReservedAddressRangeSize + AlignedChunkHeaderSize; + uptr PageSize; SpinMutex StatsMutex; };