Index: compiler-rt/lib/sanitizer_common/sanitizer_common.h =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_common.h +++ compiler-rt/lib/sanitizer_common/sanitizer_common.h @@ -84,11 +84,14 @@ // Memory management // This class relies on zero-initialization. -class ReservedAddressRange { +// TODO(flowerhack) this is a hack; move MinAlignment into sanitizer_common.h +class alignas(1 << FIRST_32_SECOND_64(3, 4)) ReservedAddressRange { public: uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0); uptr Map(uptr offset, uptr size, bool tolerate_enomem = false); + void Unmap(void *addr, uptr size); void *base() { return base_; } + uptr size() { return size_; } private: #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wunused-private-field" Index: compiler-rt/lib/sanitizer_common/sanitizer_posix.cc =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_posix.cc +++ compiler-rt/lib/sanitizer_common/sanitizer_posix.cc @@ -151,6 +151,16 @@ return reinterpret_cast(MmapFixedOrDie(fixed_addr, size)); } +// TODO(flowerhack): add sanity checks to make sure addr is actually +// within address_range +void ReservedAddressRange::Unmap(void *addr, uptr size) { + UnmapOrDie(addr, size); + if (addr == base_) { + base_ = reinterpret_cast(reinterpret_cast(addr) + size); + } + size_ = size_ - size; +} + void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { return MmapFixedImpl(fixed_addr, size, true /*tolerate_enomem*/); } Index: compiler-rt/lib/scudo/scudo_allocator_secondary.h =================================================================== --- compiler-rt/lib/scudo/scudo_allocator_secondary.h +++ compiler-rt/lib/scudo/scudo_allocator_secondary.h @@ -29,17 +29,20 @@ } void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) { + ReservedAddressRange address_range; uptr UserSize = Size - AlignedChunkHeaderSize; // The Scudo frontend prevents us from allocating more than // MaxAllowedMallocSize, so integer overflow checks would be superfluous. - uptr MapSize = Size + SecondaryHeaderSize; + uptr MapSize = Size + ReservedAddressRangeSize; if (Alignment > MinAlignment) MapSize += Alignment; MapSize = RoundUpTo(MapSize, PageSize); // Account for 2 guard pages, one before and one after the chunk. MapSize += 2 * PageSize; - uptr MapBeg = reinterpret_cast(MmapNoAccess(MapSize)); + uptr MapBeg = address_range.Init(MapSize); + CHECK_EQ(MapBeg, address_range.base()); + CHECK_EQ(MapSize, address_range.size()); if (MapBeg == ~static_cast(0)) return ReturnNullOrDieOnFailure::OnOOM(); // A page-aligned pointer is assumed after that, so check it now. @@ -62,27 +65,31 @@ PageSize; CHECK_GE(NewMapBeg, MapBeg); if (NewMapBeg != MapBeg) { - UnmapOrDie(reinterpret_cast(MapBeg), NewMapBeg - MapBeg); + address_range.Unmap(reinterpret_cast(MapBeg), NewMapBeg - MapBeg); MapBeg = NewMapBeg; + CHECK_EQ(MapBeg, address_range.base()); } UserEnd = UserBeg + UserSize; } uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize; if (NewMapEnd != MapEnd) { - UnmapOrDie(reinterpret_cast(NewMapEnd), MapEnd - NewMapEnd); + address_range.Unmap(reinterpret_cast(NewMapEnd), MapEnd - NewMapEnd); MapEnd = NewMapEnd; } MapSize = MapEnd - MapBeg; + CHECK_EQ(MapSize, address_range.size()); } CHECK_LE(UserEnd, MapEnd - PageSize); // Actually mmap the memory, preserving the guard pages on either side. - CHECK_EQ(MapBeg + PageSize, reinterpret_cast( - MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize))); + CHECK_EQ(MapBeg + PageSize, address_range.Map(MapBeg + PageSize, MapSize - 2 * PageSize)); uptr Ptr = UserBeg - AlignedChunkHeaderSize; - SecondaryHeader *Header = getHeader(Ptr); - Header->MapBeg = MapBeg; - Header->MapSize = MapSize; + ReservedAddressRange *stored_range = new(reinterpret_cast(Ptr)) ReservedAddressRange(); + // TODO should probably be a move + Swap(address_range, *stored_range); + CHECK_EQ(MapBeg, stored_range->base()); + CHECK_EQ(MapSize, stored_range->size()); + // The primary adds the whole class size to the stats when allocating a // chunk, so we will do something similar here. But we will not account for // the guard pages. @@ -96,41 +103,35 @@ } void Deallocate(AllocatorStats *Stats, void *Ptr) { - SecondaryHeader *Header = getHeader(Ptr); + ReservedAddressRange *stored_range = getHeader(Ptr); { SpinMutexLock l(&StatsMutex); - Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize); - Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize); + Stats->Sub(AllocatorStatAllocated, stored_range->size() - 2 * PageSize); + Stats->Sub(AllocatorStatMapped, stored_range->size() - 2 * PageSize); } - UnmapOrDie(reinterpret_cast(Header->MapBeg), Header->MapSize); + UnmapOrDie(reinterpret_cast(stored_range->base()), stored_range->size()); } uptr GetActuallyAllocatedSize(void *Ptr) { - SecondaryHeader *Header = getHeader(Ptr); + ReservedAddressRange *stored_range = getHeader(Ptr); // Deduct PageSize as MapSize includes the trailing guard page. - uptr MapEnd = Header->MapBeg + Header->MapSize - PageSize; + uptr MapEnd = reinterpret_cast(stored_range->base()) + stored_range->size() - PageSize; return MapEnd - reinterpret_cast(Ptr); } private: - // A Secondary allocated chunk header contains the base of the mapping and - // its size, which comprises the guard pages. - struct SecondaryHeader { - uptr MapBeg; - uptr MapSize; - }; - // Check that sizeof(SecondaryHeader) is a multiple of MinAlignment. - COMPILER_CHECK((sizeof(SecondaryHeader) & (MinAlignment - 1)) == 0); - - SecondaryHeader *getHeader(uptr Ptr) { - return reinterpret_cast(Ptr - sizeof(SecondaryHeader)); + // Check that sizeof(ReservedAddressRange) is a multiple of MinAlignment. + COMPILER_CHECK((sizeof(ReservedAddressRange) & (MinAlignment - 1)) == 0); + + ReservedAddressRange *getHeader(uptr Ptr) { + return reinterpret_cast(Ptr - sizeof(ReservedAddressRange)); } - SecondaryHeader *getHeader(const void *Ptr) { + ReservedAddressRange *getHeader(const void *Ptr) { return getHeader(reinterpret_cast(Ptr)); } - const uptr SecondaryHeaderSize = sizeof(SecondaryHeader); - const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize; + const uptr ReservedAddressRangeSize = sizeof(ReservedAddressRange); + const uptr HeadersSize = ReservedAddressRangeSize + AlignedChunkHeaderSize; uptr PageSize; SpinMutex StatsMutex; }; Index: compiler-rt/lib/scudo/scudo_platform.h =================================================================== --- compiler-rt/lib/scudo/scudo_platform.h +++ compiler-rt/lib/scudo/scudo_platform.h @@ -15,6 +15,7 @@ #define SCUDO_PLATFORM_H_ #include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_placement_new.h" #if !SANITIZER_LINUX && !SANITIZER_FUCHSIA # error "The Scudo hardened allocator is not supported on this platform."