Index: lib/sanitizer_common/sanitizer_common.h =================================================================== --- lib/sanitizer_common/sanitizer_common.h +++ lib/sanitizer_common/sanitizer_common.h @@ -133,8 +133,8 @@ uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0); uptr Map(uptr fixed_addr, uptr size, bool tolerate_enomem = false); void Unmap(uptr addr, uptr size); - const void *base() { return base_; } - const uptr size() { return size_; } + void *base() { return base_; } + uptr size() { return size_; } private: void* base_; Index: lib/sanitizer_common/sanitizer_fuchsia.cc =================================================================== --- lib/sanitizer_common/sanitizer_fuchsia.cc +++ lib/sanitizer_common/sanitizer_fuchsia.cc @@ -260,6 +260,10 @@ // Detect overflows. CHECK_LE(size, (base_as_uptr + size_) - addr); UnmapOrDie(reinterpret_cast(addr), size); + if (addr_as_void == base_) { + base_ = reinterpret_cast(reinterpret_cast(addr) + size); + } + size_ = size_ - size; } // MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator. Index: lib/sanitizer_common/sanitizer_posix_libcdep.cc =================================================================== --- lib/sanitizer_common/sanitizer_posix_libcdep.cc +++ lib/sanitizer_common/sanitizer_posix_libcdep.cc @@ -367,6 +367,10 @@ // Detect overflows. CHECK_LE(size, (base_as_uptr + size_) - addr); UnmapOrDie(reinterpret_cast(addr), size); + if (addr_as_void == base_) { + base_ = reinterpret_cast(reinterpret_cast(addr) + size); + } + size_ = size_ - size; } void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name) { Index: lib/sanitizer_common/sanitizer_win.cc =================================================================== --- lib/sanitizer_common/sanitizer_win.cc +++ lib/sanitizer_common/sanitizer_win.cc @@ -253,6 +253,10 @@ // Detect overflows. CHECK_LE(size, (base_as_uptr + size_) - addr); UnmapOrDie(reinterpret_cast(addr), size); + if (addr_as_void == base_) { + base_ = reinterpret_cast(reinterpret_cast(addr) + size); + } + size_ = size_ - size; } void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size) { Index: lib/sanitizer_common/tests/sanitizer_common_test.cc =================================================================== --- lib/sanitizer_common/tests/sanitizer_common_test.cc +++ lib/sanitizer_common/tests/sanitizer_common_test.cc @@ -366,8 +366,6 @@ uptr base_addr = address_range.Init(init_size); CHECK_NE(base_addr, (void*)-1); CHECK_EQ(base_addr, address_range.Map(base_addr, init_size)); -<<<<<<< HEAD -<<<<<<< HEAD // Unmapping the entire range should succeed. #if !SANITIZER_WINDOWS @@ -385,19 +383,6 @@ EXPECT_EQ(FlushViewOfFile(reinterpret_cast(base_addr), PageSize * 4), 0); #endif -======= -======= ->>>>>>> Introduce ReservedAddressRange to sanitizer_common. - // Unmapping the entire range should succeed. - EXPECT_EQ(msync(reinterpret_cast(base_addr), PageSize * 4, - MS_ASYNC), 0); - address_range.Unmap(base_addr, PageSize * 4); - EXPECT_EQ(msync(reinterpret_cast(base_addr), PageSize * 4, - MS_ASYNC), -1); -<<<<<<< HEAD ->>>>>>> Introduce ReservedAddressRange to sanitizer_common. -======= ->>>>>>> Introduce ReservedAddressRange to sanitizer_common. // Remap that range in. CHECK_EQ(base_addr, address_range.Map(base_addr, init_size)); Index: lib/scudo/scudo_allocator_secondary.h =================================================================== --- lib/scudo/scudo_allocator_secondary.h +++ lib/scudo/scudo_allocator_secondary.h @@ -29,17 +29,18 @@ } void *Allocate(AllocatorStats *Stats, uptr Size, uptr Alignment) { + ReservedAddressRange address_range; uptr UserSize = Size - AlignedChunkHeaderSize; // The Scudo frontend prevents us from allocating more than // MaxAllowedMallocSize, so integer overflow checks would be superfluous. - uptr MapSize = Size + SecondaryHeaderSize; + uptr MapSize = Size + ReservedAddressRangeSize; if (Alignment > MinAlignment) MapSize += Alignment; MapSize = RoundUpTo(MapSize, PageSize); // Account for 2 guard pages, one before and one after the chunk. MapSize += 2 * PageSize; - uptr MapBeg = reinterpret_cast(MmapNoAccess(MapSize)); + uptr MapBeg = address_range.Init(MapSize); if (MapBeg == ~static_cast(0)) return ReturnNullOrDieOnFailure::OnOOM(); // A page-aligned pointer is assumed after that, so check it now. @@ -62,14 +63,14 @@ PageSize; CHECK_GE(NewMapBeg, MapBeg); if (NewMapBeg != MapBeg) { - UnmapOrDie(reinterpret_cast(MapBeg), NewMapBeg - MapBeg); + address_range.Unmap(MapBeg, NewMapBeg - MapBeg); MapBeg = NewMapBeg; } UserEnd = UserBeg + UserSize; } uptr NewMapEnd = RoundUpTo(UserEnd, PageSize) + PageSize; if (NewMapEnd != MapEnd) { - UnmapOrDie(reinterpret_cast(NewMapEnd), MapEnd - NewMapEnd); + address_range.Unmap(NewMapEnd, MapEnd - NewMapEnd); MapEnd = NewMapEnd; } MapSize = MapEnd - MapBeg; @@ -77,12 +78,10 @@ CHECK_LE(UserEnd, MapEnd - PageSize); // Actually mmap the memory, preserving the guard pages on either side. - CHECK_EQ(MapBeg + PageSize, reinterpret_cast( - MmapFixedOrDie(MapBeg + PageSize, MapSize - 2 * PageSize))); + CHECK_EQ(MapBeg + PageSize, address_range.Map(MapBeg + PageSize, MapSize - 2 * PageSize)); uptr Ptr = UserBeg - AlignedChunkHeaderSize; - SecondaryHeader *Header = getHeader(Ptr); - Header->MapBeg = MapBeg; - Header->MapSize = MapSize; + ALIGNED(MinAlignment) ReservedAddressRange *stored_range = getReservedAddressRange(Ptr); + Swap(address_range, *stored_range); // The primary adds the whole class size to the stats when allocating a // chunk, so we will do something similar here. But we will not account for // the guard pages. @@ -96,41 +95,35 @@ } void Deallocate(AllocatorStats *Stats, void *Ptr) { - SecondaryHeader *Header = getHeader(Ptr); + ReservedAddressRange *stored_range = getReservedAddressRange(Ptr); { SpinMutexLock l(&StatsMutex); - Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize); - Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize); + Stats->Sub(AllocatorStatAllocated, stored_range->size() - 2 * PageSize); + Stats->Sub(AllocatorStatMapped, stored_range->size() - 2 * PageSize); } - UnmapOrDie(reinterpret_cast(Header->MapBeg), Header->MapSize); + UnmapOrDie(reinterpret_cast(stored_range->base()), stored_range->size()); } uptr GetActuallyAllocatedSize(void *Ptr) { - SecondaryHeader *Header = getHeader(Ptr); + ReservedAddressRange *stored_range = getReservedAddressRange(Ptr); // Deduct PageSize as MapSize includes the trailing guard page. - uptr MapEnd = Header->MapBeg + Header->MapSize - PageSize; + uptr MapEnd = reinterpret_cast(stored_range->base()) + stored_range->size() - PageSize; return MapEnd - reinterpret_cast(Ptr); } private: - // A Secondary allocated chunk header contains the base of the mapping and - // its size, which comprises the guard pages. - struct SecondaryHeader { - uptr MapBeg; - uptr MapSize; - }; - // Check that sizeof(SecondaryHeader) is a multiple of MinAlignment. - COMPILER_CHECK((sizeof(SecondaryHeader) & (MinAlignment - 1)) == 0); + // Check that sizeof(ReservedAddressRange) is a multiple of MinAlignment. + COMPILER_CHECK((sizeof(ReservedAddressRange) & (MinAlignment - 1)) == 0); - SecondaryHeader *getHeader(uptr Ptr) { - return reinterpret_cast(Ptr - sizeof(SecondaryHeader)); + ReservedAddressRange *getReservedAddressRange(uptr Ptr) { + return reinterpret_cast(Ptr - sizeof(ReservedAddressRange)); } - SecondaryHeader *getHeader(const void *Ptr) { - return getHeader(reinterpret_cast(Ptr)); + ReservedAddressRange *getReservedAddressRange(const void *Ptr) { + return getReservedAddressRange(reinterpret_cast(Ptr)); } - const uptr SecondaryHeaderSize = sizeof(SecondaryHeader); - const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize; + const uptr ReservedAddressRangeSize = sizeof(ReservedAddressRange); + const uptr HeadersSize = ReservedAddressRangeSize + AlignedChunkHeaderSize; uptr PageSize; SpinMutex StatsMutex; };