diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h --- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h +++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h @@ -124,15 +124,30 @@ // memory into this process in a platform-specific way. Pointer and size // arguments are expected to be page-aligned. These functions will never // return on error, instead electing to kill the calling process on failure. - // Note that memory is initially mapped inaccessible. In order for RW - // mappings, call mapMemory() followed by markReadWrite() on the returned - // pointer. Each mapping is named on platforms that support it, primarily - // Android. This name must be a statically allocated string, as the Android - // kernel uses the string pointer directly. - void *mapMemory(size_t Size, const char *Name) const; - void unmapMemory(void *Ptr, size_t Size, const char *Name) const; - void markReadWrite(void *Ptr, size_t Size, const char *Name) const; - void markInaccessible(void *Ptr, size_t Size, const char *Name) const; + // The pool memory is initially reserved and inaccessible, and RW mappings are + // subsequently created and destroyed via allocateInGuardedPool() and + // deallocateInGuardedPool(). Each mapping is named on platforms that support + // it, primarily Android. This name must be a statically allocated string, as + // the Android kernel uses the string pointer directly. + void *map(size_t Size, const char *Name) const; + void unmap(void *Ptr, size_t Size) const; + + // The pool is managed separately, as some platforms (particularly Fuchsia) + // manage virtual memory regions as a chunk where individual pages can still + // have separate permissions. These platforms maintain metadata about the + // region in order to perform operations. The pool is unique as it's the only + // thing in GWP-ASan that treats pages in a single VM region on an individual + // basis for page protection. + // The pointer returned by reserveGuardedPool() is the reserved address range + // of (at least) Size bytes. + void *reserveGuardedPool(size_t Size); + // allocateInGuardedPool() Ptr and Size must be a subrange of the previously + // reserved pool range. + void allocateInGuardedPool(void *Ptr, size_t Size) const; + // deallocateInGuardedPool() Ptr and Size must be an exact pair previously + // passed to allocateInGuardedPool(). + void deallocateInGuardedPool(void *Ptr, size_t Size) const; + void unreserveGuardedPool(); // Get the page size from the platform-specific implementation. Only needs to // be called once, and the result should be cached in PageSize in this class. diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp --- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp +++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp @@ -45,6 +45,10 @@ return SingletonPtr; } +static size_t roundUpTo(size_t Size, size_t Boundary) { + return (Size + Boundary - 1) & ~(Boundary - 1); +} + void GuardedPoolAllocator::init(const options::Options &Opts) { // Note: We return from the constructor here if GWP-ASan is not available. // This will stop heap-allocation of class members, as well as mmap() of the @@ -63,25 +67,29 @@ State.MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations; - State.PageSize = getPlatformPageSize(); + const size_t PageSize = getPlatformPageSize(); + // getPageAddr() and roundUpTo() assume the page size to be a power of 2. + assert((PageSize & (PageSize - 1)) == 0); + State.PageSize = PageSize; PerfectlyRightAlign = Opts.PerfectlyRightAlign; size_t PoolBytesRequired = - State.PageSize * (1 + State.MaxSimultaneousAllocations) + + PageSize * (1 + State.MaxSimultaneousAllocations) + State.MaxSimultaneousAllocations * State.maximumAllocationSize(); - void *GuardedPoolMemory = mapMemory(PoolBytesRequired, kGwpAsanGuardPageName); + assert(PoolBytesRequired % PageSize == 0); + void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired); - size_t BytesRequired = State.MaxSimultaneousAllocations * sizeof(*Metadata); + size_t BytesRequired = + roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), PageSize); Metadata = reinterpret_cast( - mapMemory(BytesRequired, kGwpAsanMetadataName)); - markReadWrite(Metadata, BytesRequired, kGwpAsanMetadataName); + map(BytesRequired, kGwpAsanMetadataName)); // Allocate memory and set up the free pages queue. - BytesRequired = State.MaxSimultaneousAllocations * sizeof(*FreeSlots); - FreeSlots = reinterpret_cast( - mapMemory(BytesRequired, kGwpAsanFreeSlotsName)); - markReadWrite(FreeSlots, BytesRequired, kGwpAsanFreeSlotsName); + BytesRequired = roundUpTo( + State.MaxSimultaneousAllocations * sizeof(*FreeSlots), PageSize); + FreeSlots = + reinterpret_cast(map(BytesRequired, kGwpAsanFreeSlotsName)); // Multiply the sample rate by 2 to give a good, fast approximation for (1 / // SampleRate) chance of sampling. @@ -120,21 +128,20 @@ void GuardedPoolAllocator::uninitTestOnly() { if (State.GuardedPagePool) { - unmapMemory(reinterpret_cast(State.GuardedPagePool), - State.GuardedPagePoolEnd - State.GuardedPagePool, - kGwpAsanGuardPageName); + unreserveGuardedPool(); State.GuardedPagePool = 0; State.GuardedPagePoolEnd = 0; } if (Metadata) { - unmapMemory(Metadata, State.MaxSimultaneousAllocations * sizeof(*Metadata), - kGwpAsanMetadataName); + unmap(Metadata, + roundUpTo(State.MaxSimultaneousAllocations * sizeof(*Metadata), + State.PageSize)); Metadata = nullptr; } if (FreeSlots) { - unmapMemory(FreeSlots, - State.MaxSimultaneousAllocations * sizeof(*FreeSlots), - kGwpAsanFreeSlotsName); + unmap(FreeSlots, + roundUpTo(State.MaxSimultaneousAllocations * sizeof(*FreeSlots), + State.PageSize)); FreeSlots = nullptr; } } @@ -184,8 +191,9 @@ // If a slot is multiple pages in size, and the allocation takes up a single // page, we can improve overflow detection by leaving the unused pages as // unmapped. - markReadWrite(reinterpret_cast(getPageAddr(Ptr, State.PageSize)), - Size, kGwpAsanAliveSlotName); + const size_t PageSize = State.PageSize; + allocateInGuardedPool(reinterpret_cast(getPageAddr(Ptr, PageSize)), + roundUpTo(Size, PageSize)); Meta->RecordAllocation(Ptr, Size); Meta->AllocationTrace.RecordBacktrace(Backtrace); @@ -241,8 +249,8 @@ } } - markInaccessible(reinterpret_cast(SlotStart), - State.maximumAllocationSize(), kGwpAsanGuardPageName); + deallocateInGuardedPool(reinterpret_cast(SlotStart), + State.maximumAllocationSize()); // And finally, lock again to release the slot back into the pool. ScopedLock L(PoolMutex); diff --git a/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp b/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp --- a/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp +++ b/compiler-rt/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp @@ -25,6 +25,7 @@ #define PR_SET_VMA_ANON_NAME 0 #endif // ANDROID +namespace { void MaybeSetMappingName(void *Mapping, size_t Size, const char *Name) { #ifdef ANDROID prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, Mapping, Size, Name); @@ -32,44 +33,64 @@ // Anonymous mapping names are only supported on Android. return; } +} // anonymous namespace namespace gwp_asan { void GuardedPoolAllocator::initPRNG() { - ThreadLocals.RandomState = time(nullptr) + getThreadID(); + ThreadLocals.RandomState = + static_cast(time(nullptr) + getThreadID()); } -void *GuardedPoolAllocator::mapMemory(size_t Size, const char *Name) const { - void *Ptr = - mmap(nullptr, Size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); +void *GuardedPoolAllocator::map(size_t Size, const char *Name) const { + assert((Size % State.PageSize) == 0); + void *Ptr = mmap(nullptr, Size, PROT_READ | PROT_WRITE, + MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); Check(Ptr != MAP_FAILED, "Failed to map guarded pool allocator memory"); MaybeSetMappingName(Ptr, Size, Name); return Ptr; } -void GuardedPoolAllocator::unmapMemory(void *Ptr, size_t Size, - const char *Name) const { +void GuardedPoolAllocator::unmap(void *Ptr, size_t Size) const { + assert((reinterpret_cast(Ptr) % State.PageSize) == 0); + assert((Size % State.PageSize) == 0); Check(munmap(Ptr, Size) == 0, "Failed to unmap guarded pool allocator memory."); - MaybeSetMappingName(Ptr, Size, Name); } -void GuardedPoolAllocator::markReadWrite(void *Ptr, size_t Size, - const char *Name) const { +void *GuardedPoolAllocator::reserveGuardedPool(size_t Size) { + assert((Size % State.PageSize) == 0); + void *Ptr = + mmap(nullptr, Size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + Check(Ptr != MAP_FAILED, "Failed to reserve guarded pool allocator memory"); + MaybeSetMappingName(Ptr, Size, kGwpAsanGuardPageName); + return Ptr; +} + +void GuardedPoolAllocator::unreserveGuardedPool() { + unmap(reinterpret_cast(State.GuardedPagePool), + State.GuardedPagePoolEnd - State.GuardedPagePool); +} + +void GuardedPoolAllocator::allocateInGuardedPool(void *Ptr, size_t Size) const { + assert((reinterpret_cast(Ptr) % State.PageSize) == 0); + assert((Size % State.PageSize) == 0); Check(mprotect(Ptr, Size, PROT_READ | PROT_WRITE) == 0, - "Failed to set guarded pool allocator memory at as RW."); - MaybeSetMappingName(Ptr, Size, Name); + "Failed to allocate in guarded pool allocator memory"); + MaybeSetMappingName(Ptr, Size, kGwpAsanAliveSlotName); } -void GuardedPoolAllocator::markInaccessible(void *Ptr, size_t Size, - const char *Name) const { +void GuardedPoolAllocator::deallocateInGuardedPool(void *Ptr, + size_t Size) const { + assert((reinterpret_cast(Ptr) % State.PageSize) == 0); + assert((Size % State.PageSize) == 0); // mmap() a PROT_NONE page over the address to release it to the system, if // we used mprotect() here the system would count pages in the quarantine // against the RSS. Check(mmap(Ptr, Size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, 0) != MAP_FAILED, - "Failed to set guarded pool allocator memory as inaccessible."); - MaybeSetMappingName(Ptr, Size, Name); + "Failed to deallocate in guarded pool allocator memory"); + MaybeSetMappingName(Ptr, Size, kGwpAsanGuardPageName); } size_t GuardedPoolAllocator::getPlatformPageSize() { @@ -87,5 +108,4 @@ }; pthread_atfork(Disable, Enable, Enable); } - } // namespace gwp_asan