diff --git a/compiler-rt/lib/gwp_asan/common.h b/compiler-rt/lib/gwp_asan/common.h --- a/compiler-rt/lib/gwp_asan/common.h +++ b/compiler-rt/lib/gwp_asan/common.h @@ -49,7 +49,7 @@ static constexpr size_t kMaxTraceLengthToCollect = 128; // Records the given allocation metadata into this struct. - void RecordAllocation(uintptr_t Addr, size_t Size); + void RecordAllocation(uintptr_t Addr, size_t RequestedSize); // Record that this allocation is now deallocated. void RecordDeallocation(); @@ -70,7 +70,7 @@ // valid, as the allocation has never occurred. uintptr_t Addr = 0; // Represents the actual size of the allocation. - size_t Size = 0; + size_t RequestedSize = 0; CallSiteInfo AllocationTrace; CallSiteInfo DeallocationTrace; diff --git a/compiler-rt/lib/gwp_asan/common.cpp b/compiler-rt/lib/gwp_asan/common.cpp --- a/compiler-rt/lib/gwp_asan/common.cpp +++ b/compiler-rt/lib/gwp_asan/common.cpp @@ -40,7 +40,7 @@ void AllocationMetadata::RecordAllocation(uintptr_t AllocAddr, size_t AllocSize) { Addr = AllocAddr; - Size = AllocSize; + RequestedSize = AllocSize; IsDeallocated = false; AllocationTrace.ThreadID = getThreadID(); diff --git a/compiler-rt/lib/gwp_asan/crash_handler.cpp b/compiler-rt/lib/gwp_asan/crash_handler.cpp --- a/compiler-rt/lib/gwp_asan/crash_handler.cpp +++ b/compiler-rt/lib/gwp_asan/crash_handler.cpp @@ -103,7 +103,7 @@ size_t __gwp_asan_get_allocation_size( const gwp_asan::AllocationMetadata *AllocationMeta) { - return AllocationMeta->Size; + return AllocationMeta->RequestedSize; } uint64_t __gwp_asan_get_allocation_thread_id( diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h --- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h +++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h @@ -98,6 +98,11 @@ // large for this pool to handle, or the requested size is zero. void *allocate(size_t Size); + // Allocate `ActualSize` in a guarded slot, but record that the allocation is + // of size `UserRequestedSize`. This is useful for a supporting allocator that + // needs GWP-ASan to support allocations with specific alignment. + void *allocate(size_t ActualSize, size_t UserRequestedSize); + // Deallocate memory in a guarded slot. The provided pointer must have been // allocated using this pool. This will set the guarded slot as inaccessible. void deallocate(void *Ptr); diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp --- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp +++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp @@ -113,7 +113,7 @@ const AllocationMetadata &Meta = Metadata[i]; if (Meta.Addr && !Meta.IsDeallocated && Meta.Addr >= Start && Meta.Addr < Start + Size) - Cb(Meta.Addr, Meta.Size, Arg); + Cb(Meta.Addr, Meta.RequestedSize, Arg); } } @@ -139,6 +139,11 @@ } void *GuardedPoolAllocator::allocate(size_t Size) { + return allocate(Size, Size); +} + +void *GuardedPoolAllocator::allocate(size_t ActualSize, + size_t UserRequestedSize) { // GuardedPagePoolEnd == 0 when GWP-ASan is disabled. If we are disabled, fall // back to the supporting allocator. if (State.GuardedPagePoolEnd == 0) { @@ -153,7 +158,7 @@ return nullptr; ScopedRecursiveGuard SRG; - if (Size == 0 || Size > State.maximumAllocationSize()) + if (ActualSize == 0 || ActualSize > State.maximumAllocationSize()) return nullptr; size_t Index; @@ -171,8 +176,8 @@ AlignmentStrategy Align = AlignmentStrategy::DEFAULT; if (PerfectlyRightAlign) Align = AlignmentStrategy::PERFECT; - Ptr += - State.maximumAllocationSize() - rightAlignedAllocationSize(Size, Align); + Ptr += State.maximumAllocationSize() - + rightAlignedAllocationSize(ActualSize, Align); } AllocationMetadata *Meta = addrToMetadata(Ptr); @@ -181,9 +186,9 @@ // unmapped. const size_t PageSize = State.PageSize; allocateInGuardedPool(reinterpret_cast(getPageAddr(Ptr, PageSize)), - roundUpTo(Size, PageSize)); + roundUpTo(ActualSize, PageSize)); - Meta->RecordAllocation(Ptr, Size); + Meta->RecordAllocation(Ptr, UserRequestedSize); Meta->AllocationTrace.RecordBacktrace(Backtrace); return reinterpret_cast(Ptr); @@ -250,7 +255,7 @@ ScopedLock L(PoolMutex); AllocationMetadata *Meta = addrToMetadata(reinterpret_cast(Ptr)); assert(Meta->Addr == reinterpret_cast(Ptr)); - return Meta->Size; + return Meta->RequestedSize; } AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const { diff --git a/compiler-rt/lib/gwp_asan/optional/backtrace.h b/compiler-rt/lib/gwp_asan/optional/backtrace.h --- a/compiler-rt/lib/gwp_asan/optional/backtrace.h +++ b/compiler-rt/lib/gwp_asan/optional/backtrace.h @@ -12,6 +12,11 @@ #include "gwp_asan/optional/printf.h" #include "gwp_asan/options.h" +// Implementations of PrintBacktrace and SegvBacktrace are only required if the +// optional SEGV handler is provided. We provide a few default implementations +// of these functions, as well as a default implementation of the Backtrace_t +// function that's required for GWP-ASan to record backtraces. + namespace gwp_asan { namespace backtrace { // ================================ Description ================================ diff --git a/compiler-rt/lib/gwp_asan/tests/crash_handler_api.cpp b/compiler-rt/lib/gwp_asan/tests/crash_handler_api.cpp --- a/compiler-rt/lib/gwp_asan/tests/crash_handler_api.cpp +++ b/compiler-rt/lib/gwp_asan/tests/crash_handler_api.cpp @@ -29,7 +29,7 @@ size_t Slot = State.getNearestSlot(Addr); Metadata[Slot].Addr = Addr; - Metadata[Slot].Size = Size; + Metadata[Slot].RequestedSize = Size; Metadata[Slot].IsDeallocated = IsDeallocated; Metadata[Slot].AllocationTrace.ThreadID = 123; Metadata[Slot].DeallocationTrace.ThreadID = 321; @@ -80,7 +80,8 @@ __gwp_asan_get_metadata(&State, Metadata, ErrorPtr); EXPECT_NE(nullptr, Meta); EXPECT_EQ(Metadata[Index].Addr, __gwp_asan_get_allocation_address(Meta)); - EXPECT_EQ(Metadata[Index].Size, __gwp_asan_get_allocation_size(Meta)); + EXPECT_EQ(Metadata[Index].RequestedSize, + __gwp_asan_get_allocation_size(Meta)); EXPECT_EQ(Metadata[Index].AllocationTrace.ThreadID, __gwp_asan_get_allocation_thread_id(Meta)); diff --git a/compiler-rt/lib/gwp_asan/tests/harness.h b/compiler-rt/lib/gwp_asan/tests/harness.h --- a/compiler-rt/lib/gwp_asan/tests/harness.h +++ b/compiler-rt/lib/gwp_asan/tests/harness.h @@ -35,7 +35,6 @@ // First call returns true, all the following calls return false. bool OnlyOnce(); - }; // namespace test }; // namespace gwp_asan diff --git a/compiler-rt/lib/scudo/scudo_allocator.cpp b/compiler-rt/lib/scudo/scudo_allocator.cpp --- a/compiler-rt/lib/scudo/scudo_allocator.cpp +++ b/compiler-rt/lib/scudo/scudo_allocator.cpp @@ -303,18 +303,29 @@ bool ForceZeroContents = false) { initThreadMaybe(); -#ifdef GWP_ASAN_HOOKS - if (UNLIKELY(GuardedAlloc.shouldSample())) { - if (void *Ptr = GuardedAlloc.allocate(Size)) - return Ptr; - } -#endif // GWP_ASAN_HOOKS - if (UNLIKELY(Alignment > MaxAlignment)) { if (AllocatorMayReturnNull()) return nullptr; reportAllocationAlignmentTooBig(Alignment, MaxAlignment); } + +#ifdef GWP_ASAN_HOOKS + if (UNLIKELY(GuardedAlloc.shouldSample())) { + uptr RequiredAlignment = Alignment ? Alignment : 1; + uptr RequiredSize = RoundUpTo(Size ? Size : 1, RequiredAlignment); + if (void *Ptr = GuardedAlloc.allocate(RequiredSize, Size)) { + uptr AlignedUptr = + RoundUpTo(reinterpret_cast(Ptr), RequiredAlignment); + void *AlignedPtr = reinterpret_cast(AlignedUptr); + + if (SCUDO_CAN_USE_HOOKS && &__sanitizer_malloc_hook) + __sanitizer_malloc_hook(AlignedPtr, Size); + + return AlignedPtr; + } + } +#endif // GWP_ASAN_HOOKS + if (UNLIKELY(Alignment < MinAlignment)) Alignment = MinAlignment; diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -280,8 +280,17 @@ #ifdef GWP_ASAN_HOOKS if (UNLIKELY(GuardedAlloc.shouldSample())) { - if (void *Ptr = GuardedAlloc.allocate(roundUpTo(Size, Alignment))) - return Ptr; + // Scudo allocations can request specific alignment, which GWP-ASan should + // respect. + if (void *Ptr = GuardedAlloc.allocate(roundUpTo(Size, Alignment), Size)) { + const uptr AlignedUptr = + roundUpTo(reinterpret_cast(Ptr), Alignment); + void *AlignedPtr = reinterpret_cast(AlignedUptr); + if (UNLIKELY(&__scudo_allocate_hook)) + __scudo_allocate_hook(AlignedPtr, Size); + Stats.add(StatAllocated, Size); + return AlignedPtr; + } } #endif // GWP_ASAN_HOOKS @@ -480,18 +489,20 @@ // being destroyed properly. Any other heap operation will do a full init. initThreadMaybe(/*MinimalInit=*/true); + if (UNLIKELY(&__scudo_deallocate_hook)) + __scudo_deallocate_hook(Ptr); + + if (UNLIKELY(!Ptr)) + return; + #ifdef GWP_ASAN_HOOKS if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) { GuardedAlloc.deallocate(Ptr); + Stats.add(StatFree, GuardedAlloc.getSize(Ptr)); return; } #endif // GWP_ASAN_HOOKS - if (UNLIKELY(&__scudo_deallocate_hook)) - __scudo_deallocate_hook(Ptr); - - if (UNLIKELY(!Ptr)) - return; if (UNLIKELY(!isAligned(reinterpret_cast(Ptr), MinAlignment))) reportMisalignedPointer(AllocatorAction::Deallocating, Ptr); @@ -547,6 +558,7 @@ if (NewPtr) memcpy(NewPtr, OldPtr, (NewSize < OldSize) ? NewSize : OldSize); GuardedAlloc.deallocate(OldPtr); + Stats.add(StatFree, OldSize); return NewPtr; } #endif // GWP_ASAN_HOOKS