diff --git a/compiler-rt/lib/gwp_asan/common.h b/compiler-rt/lib/gwp_asan/common.h --- a/compiler-rt/lib/gwp_asan/common.h +++ b/compiler-rt/lib/gwp_asan/common.h @@ -35,7 +35,7 @@ uint8_t Magic[4] = {}; // Update the version number when the AllocatorState or AllocationMetadata // change. - static constexpr uint16_t kAllocatorVersion = 1; + static constexpr uint16_t kAllocatorVersion = 2; uint16_t Version = 0; uint16_t Reserved = 0; }; @@ -98,6 +98,12 @@ // Whether this allocation has been deallocated yet. bool IsDeallocated = false; + + // In recoverable mode, whether this allocation has had a crash associated + // with it. This has certain side effects, like meaning this allocation will + // permanently occupy a slot, and won't ever have another crash reported from + // it. + bool HasCrashed = false; }; // This holds the state that's shared between the GWP-ASan allocator and the @@ -127,6 +133,11 @@ // must be within memory owned by this pool, else the result is undefined. bool isGuardPage(uintptr_t Ptr) const; + // Returns the address that's used by __gwp_asan_get_internal_crash_address() + // and GPA::raiseInternallyDetectedError() to communicate that the SEGV in + // question comes from an internally-detected error. + uintptr_t internallyDetectedErrorFaultAddress() const; + // The number of guarded slots that this pool holds. size_t MaxSimultaneousAllocations = 0; diff --git a/compiler-rt/lib/gwp_asan/common.cpp b/compiler-rt/lib/gwp_asan/common.cpp --- a/compiler-rt/lib/gwp_asan/common.cpp +++ b/compiler-rt/lib/gwp_asan/common.cpp @@ -105,4 +105,8 @@ return addrToSlot(this, Ptr + PageSize); // Round up. } +uintptr_t AllocatorState::internallyDetectedErrorFaultAddress() const { + return GuardedPagePoolEnd - 0x10; +} + } // namespace gwp_asan diff --git a/compiler-rt/lib/gwp_asan/crash_handler.h b/compiler-rt/lib/gwp_asan/crash_handler.h --- a/compiler-rt/lib/gwp_asan/crash_handler.h +++ b/compiler-rt/lib/gwp_asan/crash_handler.h @@ -46,12 +46,18 @@ const gwp_asan::AllocationMetadata *Metadata, uintptr_t ErrorPtr); -// For internally-detected errors (double free, invalid free), this function -// returns the pointer that the error occurred at. If the error is unrelated to -// GWP-ASan, or if the error was caused by a non-internally detected failure, -// this function returns zero. +// This function, provided the fault address from the signal handler, returns +// the following values: +// 1. If the crash was caused by an internally-detected error (invalid free, +// double free), this function returns the pointer that was used for the +// internally-detected bad operation (i.e. the pointer given to free()). +// 2. For externally-detected crashes (use-after-free, buffer-overflow), this +// function returns zero. +// 3. If GWP-ASan wasn't responsible for the crash at all, this function also +// returns zero. uintptr_t -__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State); +__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State, + uintptr_t ErrorPtr); // Returns a pointer to the metadata for the allocation that's responsible for // the crash. This metadata should not be dereferenced directly due to API diff --git a/compiler-rt/lib/gwp_asan/crash_handler.cpp b/compiler-rt/lib/gwp_asan/crash_handler.cpp --- a/compiler-rt/lib/gwp_asan/crash_handler.cpp +++ b/compiler-rt/lib/gwp_asan/crash_handler.cpp @@ -31,7 +31,15 @@ } uintptr_t -__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State) { +__gwp_asan_get_internal_crash_address(const gwp_asan::AllocatorState *State, + uintptr_t ErrorPtr) { + // There can be a race between internally- and externally-raised faults. The + // fault address from the signal handler is used to discriminate whether it's + // internally- or externally-raised, and the pool maintains a special page at + // the end of the GuardedPagePool specifically for the internally-raised + // faults. + if (ErrorPtr != State->internallyDetectedErrorFaultAddress()) + return 0u; return State->FailureAddress; } diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h --- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h +++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.h @@ -115,6 +115,11 @@ // Returns a pointer to the AllocatorState region. const AllocatorState *getAllocatorState() const { return &State; } + // Under the recoverable mode, the signal handler should call the following + // hooks pre- and post- dumping the crash report, respectively. + void handleRecoverablePreCrashReport(void* Ptr); + void handleRecoverablePostCrashReport(void* Ptr); + // Exposed as protected for testing. protected: // Returns the actual allocation size required to service an allocation with @@ -185,7 +190,7 @@ // Raise a SEGV and set the corresponding fields in the Allocator's State in // order to tell the crash handler what happened. Used when errors are // detected internally (Double Free, Invalid Free). - void trapOnAddress(uintptr_t Address, Error E); + void raiseInternallyDetectedError(uintptr_t Address, Error E); static GuardedPoolAllocator *getSingleton(); diff --git a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp --- a/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp +++ b/compiler-rt/lib/gwp_asan/guarded_pool_allocator.cpp @@ -8,6 +8,7 @@ #include "gwp_asan/guarded_pool_allocator.h" +#include "gwp_asan/crash_handler.h" #include "gwp_asan/options.h" #include "gwp_asan/utilities.h" @@ -73,8 +74,15 @@ assert((PageSize & (PageSize - 1)) == 0); State.PageSize = PageSize; + // Number of pages required = + // + MaxSimultaneousAllocations * maximumAllocationSize (N pages per slot) + // + MaxSimultaneousAllocations (one guard on the left side of each slot) + // + 1 (an extra guard page at the end of the pool, on the right side) + // + 1 (an extra page that's used for reporting internally-detected crashes, + // like double free and invalid free, to the signal handler; see + // raiseInternallyDetectedError() for more info) size_t PoolBytesRequired = - PageSize * (1 + State.MaxSimultaneousAllocations) + + PageSize * (2 + State.MaxSimultaneousAllocations) + State.MaxSimultaneousAllocations * State.maximumAllocationSize(); assert(PoolBytesRequired % PageSize == 0); void *GuardedPoolMemory = reserveGuardedPool(PoolBytesRequired); @@ -258,17 +266,60 @@ return reinterpret_cast(UserPtr); } -void GuardedPoolAllocator::trapOnAddress(uintptr_t Address, Error E) { +void GuardedPoolAllocator::raiseInternallyDetectedError(uintptr_t Address, + Error E) { + // Disable the allocator before setting the internal failure state. In + // non-recoverable mode, the allocator will be permanently disabled, and so + // things will be accessed without locks. + disable(); + + // Races between internally- and externally-raised faults can happen. Right + // now, in this thread we've locked the allocator in order to raise an + // internally-detected fault, and another thread could SIGSEGV to raise an + // externally-detected fault. What will happen is that the other thread will + // wait in the signal handler, as we hold the allocator's locks from the + // disable() above. We'll trigger the signal handler by touching the + // internal-signal-raising address below, and the signal handler from our + // thread will get to run first as we will continue to hold the allocator + // locks until the enable() at the end of this function. Be careful though, if + // this thread receives another SIGSEGV after the disable() above, but before + // touching the internal-signal-raising address below, then this thread will + // get an "externally-raised" SIGSEGV while *also* holding the allocator + // locks, which means this thread's signal handler will deadlock. This could + // be resolved with a re-entrant lock, but asking platforms to implement this + // seems unnecessary given the only way to get a SIGSEGV in this critical + // section is either a memory safety bug in the couple lines of code below (be + // careful!), or someone outside uses `kill(this_thread, SIGSEGV)`, which + // really shouldn't happen. + State.FailureType = E; State.FailureAddress = Address; - // Raise a SEGV by touching first guard page. - volatile char *p = reinterpret_cast(State.GuardedPagePool); + // Raise a SEGV by touching a specific address that identifies to the crash + // handler that this is an internally-raised fault. Changing this address? + // Don't forget to update __gwp_asan_get_internal_crash_address. + volatile char *p = + reinterpret_cast(State.internallyDetectedErrorFaultAddress()); *p = 0; - // Normally, would be __builtin_unreachable(), but because of - // https://bugs.llvm.org/show_bug.cgi?id=47480, unreachable will DCE the - // volatile store above, even though it has side effects. - __builtin_trap(); + + // This should never be reached in non-recoverable mode. Ensure that the + // signal handler called handleRecoverablePostCrashReport(), which was + // responsible for re-setting these fields. + assert(State.FailureType == Error::UNKNOWN); + assert(State.FailureAddress == 0u); + + // In recoverable mode, the signal handler (after dumping the crash) marked + // the page containing the InternalFaultSegvAddress as read/writeable, to + // allow the second touch to succeed after returning from the signal handler. + // Now, we need to mark the page as non-read/write-able again, so future + // internal faults can be raised. + deallocateInGuardedPool( + reinterpret_cast(getPageAddr( + State.internallyDetectedErrorFaultAddress(), State.PageSize)), + State.PageSize); + + // And now we're done with patching ourselves back up, enable the allocator. + enable(); } void GuardedPoolAllocator::stop() { @@ -282,19 +333,25 @@ size_t Slot = State.getNearestSlot(UPtr); uintptr_t SlotStart = State.slotToAddr(Slot); AllocationMetadata *Meta = addrToMetadata(UPtr); + + // If this allocation is responsible for crash, never recycle it. Turn the + // deallocate() call into a no-op. + if (Meta->HasCrashed) + return; + if (Meta->Addr != UPtr) { - // If multiple errors occur at the same time, use the first one. - ScopedLock L(PoolMutex); - trapOnAddress(UPtr, Error::INVALID_FREE); + raiseInternallyDetectedError(UPtr, Error::INVALID_FREE); + return; + } + if (Meta->IsDeallocated) { + raiseInternallyDetectedError(UPtr, Error::DOUBLE_FREE); + return; } // Intentionally scope the mutex here, so that other threads can access the // pool during the expensive markInaccessible() call. { ScopedLock L(PoolMutex); - if (Meta->IsDeallocated) { - trapOnAddress(UPtr, Error::DOUBLE_FREE); - } // Ensure that the deallocation is recorded before marking the page as // inaccessible. Otherwise, a racy use-after-free will have inconsistent @@ -318,6 +375,61 @@ freeSlot(Slot); } +// Thread-compatible, protected by PoolMutex. +static bool PreviousRecursiveGuard; + +void GuardedPoolAllocator::handleRecoverablePreCrashReport(void *Ptr) { + assert(pointerIsMine(Ptr) && "Pointer is not mine!"); + uintptr_t InternalCrashAddr = __gwp_asan_get_internal_crash_address( + &State, reinterpret_cast(Ptr)); + if (!InternalCrashAddr) + disable(); + + // If something in the signal handler calls malloc() while dumping the + // GWP-ASan report (e.g. backtrace_symbols()), make sure that GWP-ASan doesn't + // service that allocation. `PreviousRecursiveGuard` is protected by the + // allocator locks taken in disable(), either explicitly above for + // externally-raised errors, or implicitly in raiseInternallyDetectedError() + // for internally-detected errors. + PreviousRecursiveGuard = getThreadLocals()->RecursiveGuard; + getThreadLocals()->RecursiveGuard = true; +} + +void GuardedPoolAllocator::handleRecoverablePostCrashReport(void *SignalPtr) { + uintptr_t SignalUPtr = reinterpret_cast(SignalPtr); + uintptr_t InternalCrashAddr = __gwp_asan_get_internal_crash_address(&State, SignalUPtr); + uintptr_t ErrorUptr = InternalCrashAddr ?: SignalUPtr; + + AllocationMetadata *Metadata = addrToMetadata(ErrorUptr); + Metadata->HasCrashed = true; + + allocateInGuardedPool( + reinterpret_cast(getPageAddr(SignalUPtr, State.PageSize)), + State.PageSize); + + // Clear the internal state in order to not confuse the crash handler if a + // use-after-free or buffer-overflow comes from a different allocation in the + // future. + if (InternalCrashAddr) { + State.FailureType = Error::UNKNOWN; + State.FailureAddress = 0; + } + + size_t Slot = State.getNearestSlot(ErrorUptr); + // If the slot is available, remove it permanently. + for (size_t i = 0; i < FreeSlotsLength; ++i) { + if (FreeSlots[i] == Slot) { + FreeSlots[i] = FreeSlots[FreeSlotsLength - 1]; + FreeSlotsLength -= 1; + break; + } + } + + getThreadLocals()->RecursiveGuard = PreviousRecursiveGuard; + if (!InternalCrashAddr) + enable(); +} + size_t GuardedPoolAllocator::getSize(const void *Ptr) { assert(pointerIsMine(Ptr)); ScopedLock L(PoolMutex); diff --git a/compiler-rt/lib/gwp_asan/optional/segv_handler.h b/compiler-rt/lib/gwp_asan/optional/segv_handler.h --- a/compiler-rt/lib/gwp_asan/optional/segv_handler.h +++ b/compiler-rt/lib/gwp_asan/optional/segv_handler.h @@ -23,7 +23,8 @@ // before this function. void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf, gwp_asan::backtrace::PrintBacktrace_t PrintBacktrace, - gwp_asan::backtrace::SegvBacktrace_t SegvBacktrace); + gwp_asan::backtrace::SegvBacktrace_t SegvBacktrace, + bool Recoverable = false); // Uninistall the signal handlers, test-only. void uninstallSignalHandlers(); diff --git a/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp b/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp --- a/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp +++ b/compiler-rt/lib/gwp_asan/optional/segv_handler_fuchsia.cpp @@ -15,7 +15,8 @@ void installSignalHandlers(gwp_asan::GuardedPoolAllocator * /* GPA */, Printf_t /* Printf */, backtrace::PrintBacktrace_t /* PrintBacktrace */, - backtrace::SegvBacktrace_t /* SegvBacktrace */) {} + backtrace::SegvBacktrace_t /* SegvBacktrace */, + bool /* Recoverable */) {} void uninstallSignalHandlers() {} } // namespace segv_handler diff --git a/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp b/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp --- a/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp +++ b/compiler-rt/lib/gwp_asan/optional/segv_handler_posix.cpp @@ -106,19 +106,31 @@ assert(State && "dumpReport missing Allocator State."); assert(Metadata && "dumpReport missing Metadata."); assert(Printf && "dumpReport missing Printf."); + assert(__gwp_asan_error_is_mine(State, ErrorPtr) && + "dumpReport() called on a non-GWP-ASan error."); - if (!__gwp_asan_error_is_mine(State, ErrorPtr)) + uintptr_t InternalErrorPtr = + __gwp_asan_get_internal_crash_address(State, ErrorPtr); + if (InternalErrorPtr) + ErrorPtr = InternalErrorPtr; + + const gwp_asan::AllocationMetadata *AllocMeta = + __gwp_asan_get_metadata(State, Metadata, ErrorPtr); + + // It's unusual for a signal handler to be invoked multiple times for the same + // allocation, but it's possible in various scenarios, like: + // 1. A double-free or invalid-free was invoked in one thread at the same + // time as a buffer-overflow or use-after-free in another thread, or + // 2. Two threads do a use-after-free or buffer-overflow at the same time. + // In these instances, we've already dumped a report for this allocation, so + // skip dumping this issue as well. + if (AllocMeta->HasCrashed) return; Printf("*** GWP-ASan detected a memory error ***\n"); ScopedEndOfReportDecorator Decorator(Printf); - uintptr_t InternalErrorPtr = __gwp_asan_get_internal_crash_address(State); - if (InternalErrorPtr != 0u) - ErrorPtr = InternalErrorPtr; - Error E = __gwp_asan_diagnose_error(State, Metadata, ErrorPtr); - if (E == Error::UNKNOWN) { Printf("GWP-ASan cannot provide any more information about this error. " "This may occur due to a wild memory access into the GWP-ASan pool, " @@ -126,9 +138,6 @@ return; } - const gwp_asan::AllocationMetadata *AllocMeta = - __gwp_asan_get_metadata(State, Metadata, ErrorPtr); - // Print the error header. printHeader(E, ErrorPtr, AllocMeta, Printf); @@ -168,23 +177,35 @@ struct sigaction PreviousHandler; bool SignalHandlerInstalled; +bool RecoverableSignal; gwp_asan::GuardedPoolAllocator *GPAForSignalHandler; Printf_t PrintfForSignalHandler; PrintBacktrace_t PrintBacktraceForSignalHandler; SegvBacktrace_t BacktraceForSignalHandler; static void sigSegvHandler(int sig, siginfo_t *info, void *ucontext) { - if (GPAForSignalHandler) { - GPAForSignalHandler->stop(); + const gwp_asan::AllocatorState *State = GPAForSignalHandler->getAllocatorState(); + void* FaultAddr = info->si_addr; + uintptr_t FaultAddrUPtr = reinterpret_cast(FaultAddr); - dumpReport(reinterpret_cast(info->si_addr), - GPAForSignalHandler->getAllocatorState(), - GPAForSignalHandler->getMetadataRegion(), + if (__gwp_asan_error_is_mine(State, FaultAddrUPtr)) { + if (RecoverableSignal) + GPAForSignalHandler->handleRecoverablePreCrashReport(FaultAddr); + else + GPAForSignalHandler->stop(); + + dumpReport(FaultAddrUPtr, State, GPAForSignalHandler->getMetadataRegion(), BacktraceForSignalHandler, PrintfForSignalHandler, PrintBacktraceForSignalHandler, ucontext); + + if (RecoverableSignal) { + GPAForSignalHandler->handleRecoverablePostCrashReport(FaultAddr); + return; + } } - // Process any previous handlers. + // Process any previous handlers as long as the crash wasn't a GWP-ASan crash + // in recoverable mode. if (PreviousHandler.sa_flags & SA_SIGINFO) { PreviousHandler.sa_sigaction(sig, info, ucontext); } else if (PreviousHandler.sa_handler == SIG_DFL) { @@ -210,7 +231,7 @@ void installSignalHandlers(gwp_asan::GuardedPoolAllocator *GPA, Printf_t Printf, PrintBacktrace_t PrintBacktrace, - SegvBacktrace_t SegvBacktrace) { + SegvBacktrace_t SegvBacktrace, bool Recoverable) { assert(GPA && "GPA wasn't provided to installSignalHandlers."); assert(Printf && "Printf wasn't provided to installSignalHandlers."); assert(PrintBacktrace && @@ -221,6 +242,7 @@ PrintfForSignalHandler = Printf; PrintBacktraceForSignalHandler = PrintBacktrace; BacktraceForSignalHandler = SegvBacktrace; + RecoverableSignal = Recoverable; struct sigaction Action = {}; Action.sa_sigaction = sigSegvHandler; diff --git a/compiler-rt/lib/gwp_asan/options.inc b/compiler-rt/lib/gwp_asan/options.inc --- a/compiler-rt/lib/gwp_asan/options.inc +++ b/compiler-rt/lib/gwp_asan/options.inc @@ -49,6 +49,16 @@ "the same. Note, if the previously installed SIGSEGV handler is SIG_IGN, " "we terminate the process after dumping the error report.") +GWP_ASAN_OPTION( + bool, Recoverable, false, + "Install GWP-ASan's signal handler in recoverable mode. This means that " + "upon GWP-ASan detecting an error, it'll print the error report, but *not* " + "crash. Only one crash per sampled allocation will ever be recorded, and " + "if a sampled allocation does actually cause a crash, it'll permanently " + "occupy a slot in the pool. The recoverable mode also means that " + "previously-installed signal handlers will only be triggered for " + "non-GWP-ASan errors, as all GWP-ASan errors won't be forwarded.") + GWP_ASAN_OPTION(bool, InstallForkHandlers, true, "Install GWP-ASan atfork handlers to acquire internal locks " "before fork and release them after.") diff --git a/compiler-rt/lib/gwp_asan/tests/CMakeLists.txt b/compiler-rt/lib/gwp_asan/tests/CMakeLists.txt --- a/compiler-rt/lib/gwp_asan/tests/CMakeLists.txt +++ b/compiler-rt/lib/gwp_asan/tests/CMakeLists.txt @@ -26,7 +26,8 @@ harness.cpp enable_disable.cpp late_init.cpp - options.cpp) + options.cpp + recoverable.cpp) set(GWP_ASAN_UNIT_TEST_HEADERS ${GWP_ASAN_HEADERS} diff --git a/compiler-rt/lib/gwp_asan/tests/backtrace.cpp b/compiler-rt/lib/gwp_asan/tests/backtrace.cpp --- a/compiler-rt/lib/gwp_asan/tests/backtrace.cpp +++ b/compiler-rt/lib/gwp_asan/tests/backtrace.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include #include #include "gwp_asan/common.h" @@ -14,38 +15,44 @@ // Optnone to ensure that the calls to these functions are not optimized away, // as we're looking for them in the backtraces. -__attribute((optnone)) void * +__attribute__((optnone)) static void * AllocateMemory(gwp_asan::GuardedPoolAllocator &GPA) { return GPA.allocate(1); } -__attribute((optnone)) void +__attribute__((optnone)) static void DeallocateMemory(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) { GPA.deallocate(Ptr); } -__attribute((optnone)) void +__attribute__((optnone)) static void DeallocateMemory2(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) { GPA.deallocate(Ptr); } -__attribute__((optnone)) void TouchMemory(void *Ptr) { +__attribute__((optnone)) static void TouchMemory(void *Ptr) { *(reinterpret_cast(Ptr)) = 7; } -TEST_F(BacktraceGuardedPoolAllocatorDeathTest, DoubleFree) { +TEST_P(BacktraceGuardedPoolAllocatorDeathTest, DoubleFree) { void *Ptr = AllocateMemory(GPA); DeallocateMemory(GPA, Ptr); - std::string DeathRegex = "Double Free.*"; - DeathRegex.append("DeallocateMemory2.*"); - - DeathRegex.append("was deallocated.*"); - DeathRegex.append("DeallocateMemory.*"); - - DeathRegex.append("was allocated.*"); - DeathRegex.append("AllocateMemory.*"); - ASSERT_DEATH(DeallocateMemory2(GPA, Ptr), DeathRegex); + std::string DeathRegex = "Double Free.*DeallocateMemory2.*"; + DeathRegex.append("was deallocated.*DeallocateMemory[^2].*"); + DeathRegex.append("was allocated.*AllocateMemory"); + if (!Recoverable) { + ASSERT_DEATH(DeallocateMemory2(GPA, Ptr), DeathRegex); + return; + } + + // For recoverable, assert that DeallocateMemory2() doesn't crash. + DeallocateMemory2(GPA, Ptr); + // Fuchsia's zxtest doesn't have an EXPECT_THAT(testing::MatchesRegex(), ...), + // so check the regex manually. + EXPECT_TRUE(std::regex_search( + GetOutputBuffer(), + std::basic_regex(DeathRegex, std::regex_constants::extended))); } -TEST_F(BacktraceGuardedPoolAllocatorDeathTest, UseAfterFree) { +TEST_P(BacktraceGuardedPoolAllocatorDeathTest, UseAfterFree) { #if defined(__linux__) && __ARM_ARCH == 7 // Incomplete backtrace on Armv7 Linux GTEST_SKIP(); @@ -54,17 +61,28 @@ void *Ptr = AllocateMemory(GPA); DeallocateMemory(GPA, Ptr); - std::string DeathRegex = "Use After Free.*"; - DeathRegex.append("TouchMemory.*"); - - DeathRegex.append("was deallocated.*"); - DeathRegex.append("DeallocateMemory.*"); - - DeathRegex.append("was allocated.*"); - DeathRegex.append("AllocateMemory.*"); - ASSERT_DEATH(TouchMemory(Ptr), DeathRegex); + std::string DeathRegex = "Use After Free.*TouchMemory.*"; + DeathRegex.append("was deallocated.*DeallocateMemory[^2].*"); + DeathRegex.append("was allocated.*AllocateMemory"); + + if (!Recoverable) { + ASSERT_DEATH(TouchMemory(Ptr), DeathRegex); + return; + } + + // For recoverable, assert that TouchMemory() doesn't crash. + TouchMemory(Ptr); + // Fuchsia's zxtest doesn't have an EXPECT_THAT(testing::MatchesRegex(), ...), + // so check the regex manually. + EXPECT_TRUE(std::regex_search( + GetOutputBuffer(), + std::basic_regex(DeathRegex, std::regex_constants::extended))); } +INSTANTIATE_TEST_SUITE_P(RecoverableSignalDeathTest, + BacktraceGuardedPoolAllocatorDeathTest, + /* Recoverable */ testing::Bool()); + TEST(Backtrace, Short) { gwp_asan::AllocationMetadata Meta; Meta.AllocationTrace.RecordBacktrace( diff --git a/compiler-rt/lib/gwp_asan/tests/crash_handler_api.cpp b/compiler-rt/lib/gwp_asan/tests/crash_handler_api.cpp --- a/compiler-rt/lib/gwp_asan/tests/crash_handler_api.cpp +++ b/compiler-rt/lib/gwp_asan/tests/crash_handler_api.cpp @@ -40,7 +40,8 @@ void setupState() { State.GuardedPagePool = 0x2000; - State.GuardedPagePoolEnd = 0xb000; + State.GuardedPagePoolEnd = 0xc000; + InternalFaultAddr = State.GuardedPagePoolEnd - 0x10; State.MaxSimultaneousAllocations = 4; // 0x3000, 0x5000, 0x7000, 0x9000. State.PageSize = 0x1000; } @@ -100,6 +101,7 @@ static uintptr_t BacktraceConstants[kNumBacktraceConstants]; AllocatorState State = {}; AllocationMetadata Metadata[4] = {}; + uintptr_t InternalFaultAddr; }; uintptr_t CrashHandlerAPITest::BacktraceConstants[kNumBacktraceConstants] = { @@ -125,7 +127,7 @@ EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress)); EXPECT_EQ(Error::UNKNOWN, __gwp_asan_diagnose_error(&State, Metadata, FailureAddress)); - EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State)); + EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State, FailureAddress)); EXPECT_EQ(nullptr, __gwp_asan_get_metadata(&State, Metadata, FailureAddress)); } @@ -140,7 +142,8 @@ EXPECT_TRUE(__gwp_asan_error_is_mine(&State)); EXPECT_EQ(Error::DOUBLE_FREE, __gwp_asan_diagnose_error(&State, Metadata, 0x0)); - EXPECT_EQ(FailureAddress, __gwp_asan_get_internal_crash_address(&State)); + EXPECT_EQ(FailureAddress, + __gwp_asan_get_internal_crash_address(&State, InternalFaultAddr)); checkMetadata(Index, FailureAddress); } @@ -155,7 +158,8 @@ EXPECT_TRUE(__gwp_asan_error_is_mine(&State)); EXPECT_EQ(Error::INVALID_FREE, __gwp_asan_diagnose_error(&State, Metadata, 0x0)); - EXPECT_EQ(FailureAddress, __gwp_asan_get_internal_crash_address(&State)); + EXPECT_EQ(FailureAddress, + __gwp_asan_get_internal_crash_address(&State, InternalFaultAddr)); checkMetadata(Index, FailureAddress); } @@ -168,7 +172,8 @@ EXPECT_TRUE(__gwp_asan_error_is_mine(&State)); EXPECT_EQ(Error::INVALID_FREE, __gwp_asan_diagnose_error(&State, Metadata, 0x0)); - EXPECT_EQ(FailureAddress, __gwp_asan_get_internal_crash_address(&State)); + EXPECT_EQ(FailureAddress, + __gwp_asan_get_internal_crash_address(&State, InternalFaultAddr)); EXPECT_EQ(nullptr, __gwp_asan_get_metadata(&State, Metadata, FailureAddress)); } @@ -180,7 +185,7 @@ EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress)); EXPECT_EQ(Error::USE_AFTER_FREE, __gwp_asan_diagnose_error(&State, Metadata, FailureAddress)); - EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State)); + EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State, FailureAddress)); checkMetadata(Index, FailureAddress); } @@ -192,7 +197,7 @@ EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress)); EXPECT_EQ(Error::BUFFER_OVERFLOW, __gwp_asan_diagnose_error(&State, Metadata, FailureAddress)); - EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State)); + EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State, FailureAddress)); checkMetadata(Index, FailureAddress); } @@ -204,6 +209,6 @@ EXPECT_TRUE(__gwp_asan_error_is_mine(&State, FailureAddress)); EXPECT_EQ(Error::BUFFER_UNDERFLOW, __gwp_asan_diagnose_error(&State, Metadata, FailureAddress)); - EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State)); + EXPECT_EQ(0u, __gwp_asan_get_internal_crash_address(&State, FailureAddress)); checkMetadata(Index, FailureAddress); } diff --git a/compiler-rt/lib/gwp_asan/tests/harness.h b/compiler-rt/lib/gwp_asan/tests/harness.h --- a/compiler-rt/lib/gwp_asan/tests/harness.h +++ b/compiler-rt/lib/gwp_asan/tests/harness.h @@ -81,7 +81,8 @@ MaxSimultaneousAllocations; }; -class BacktraceGuardedPoolAllocator : public Test { +class BacktraceGuardedPoolAllocator + : public testing::TestWithParam { public: void SetUp() override { gwp_asan::options::Options Opts; @@ -91,10 +92,19 @@ Opts.InstallForkHandlers = gwp_asan::test::OnlyOnce(); GPA.init(Opts); + // In recoverable mode, capture GWP-ASan logs to an internal buffer so that + // we can search it in unit tests. For non-recoverable tests, the default + // buffer is fine, as any tests should be EXPECT_DEATH()'d. + Recoverable = GetParam(); + gwp_asan::Printf_t PrintfFunction = PrintfToBuffer; + GetOutputBuffer().clear(); + if (!Recoverable) + PrintfFunction = gwp_asan::test::getPrintfFunction(); + gwp_asan::segv_handler::installSignalHandlers( - &GPA, gwp_asan::test::getPrintfFunction(), - gwp_asan::backtrace::getPrintBacktraceFunction(), - gwp_asan::backtrace::getSegvBacktraceFunction()); + &GPA, PrintfFunction, gwp_asan::backtrace::getPrintBacktraceFunction(), + gwp_asan::backtrace::getSegvBacktraceFunction(), + /* Recoverable */ Recoverable); } void TearDown() override { @@ -103,7 +113,23 @@ } protected: + static std::string &GetOutputBuffer() { + static std::string Buffer; + return Buffer; + } + + __attribute__((format(printf, 1, 2))) static void + PrintfToBuffer(const char *Format, ...) { + va_list AP; + va_start(AP, Format); + char Buffer[8192]; + vsnprintf(Buffer, sizeof(Buffer), Format, AP); + GetOutputBuffer() += Buffer; + va_end(AP); + } + gwp_asan::GuardedPoolAllocator GPA; + bool Recoverable; }; // https://github.com/google/googletest/blob/master/docs/advanced.md#death-tests-and-threads diff --git a/compiler-rt/lib/gwp_asan/tests/recoverable.cpp b/compiler-rt/lib/gwp_asan/tests/recoverable.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/gwp_asan/tests/recoverable.cpp @@ -0,0 +1,193 @@ +//===-- recoverable.cpp -----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include +#include +#include +#include +#include +#include + +#include "gwp_asan/common.h" +#include "gwp_asan/crash_handler.h" +#include "gwp_asan/tests/harness.h" + +// Optnone to ensure that the calls to these functions are not optimized away, +// as we're looking for them in the backtraces. +__attribute__((optnone)) static char * +AllocateMemory(gwp_asan::GuardedPoolAllocator &GPA) { + return static_cast(GPA.allocate(1)); +} +__attribute__((optnone)) static void +DeallocateMemory(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) { + GPA.deallocate(Ptr); +} +__attribute__((optnone)) static void +DeallocateMemory2(gwp_asan::GuardedPoolAllocator &GPA, void *Ptr) { + GPA.deallocate(Ptr); +} +__attribute__((optnone)) static void TouchMemory(void *Ptr) { + *(reinterpret_cast(Ptr)) = 7; +} + +void CheckOnlyOneGwpAsanCrash(const std::string &OutputBuffer) { + const char *kGwpAsanErrorString = "GWP-ASan detected a memory error"; + size_t FirstIndex = OutputBuffer.find(kGwpAsanErrorString); + ASSERT_NE(FirstIndex, std::string::npos) << "Didn't detect a GWP-ASan crash"; + ASSERT_EQ(OutputBuffer.find(kGwpAsanErrorString, FirstIndex + 1), + std::string::npos) + << "Detected more than one GWP-ASan crash:\n" + << OutputBuffer; +} + +TEST_P(BacktraceGuardedPoolAllocator, MultipleDoubleFreeOnlyOneOutput) { + SCOPED_TRACE(""); + void *Ptr = AllocateMemory(GPA); + DeallocateMemory(GPA, Ptr); + // First time should generate a crash report. + DeallocateMemory(GPA, Ptr); + CheckOnlyOneGwpAsanCrash(GetOutputBuffer()); + ASSERT_NE(std::string::npos, GetOutputBuffer().find("Double Free")); + + // Ensure the crash is only reported once. + GetOutputBuffer().clear(); + for (size_t i = 0; i < 100; ++i) { + DeallocateMemory(GPA, Ptr); + ASSERT_TRUE(GetOutputBuffer().empty()); + } +} + +TEST_P(BacktraceGuardedPoolAllocator, MultipleInvalidFreeOnlyOneOutput) { + SCOPED_TRACE(""); + char *Ptr = static_cast(AllocateMemory(GPA)); + // First time should generate a crash report. + DeallocateMemory(GPA, Ptr + 1); + CheckOnlyOneGwpAsanCrash(GetOutputBuffer()); + ASSERT_NE(std::string::npos, GetOutputBuffer().find("Invalid (Wild) Free")); + + // Ensure the crash is only reported once. + GetOutputBuffer().clear(); + for (size_t i = 0; i < 100; ++i) { + DeallocateMemory(GPA, Ptr + 1); + ASSERT_TRUE(GetOutputBuffer().empty()); + } +} + +TEST_P(BacktraceGuardedPoolAllocator, MultipleUseAfterFreeOnlyOneOutput) { + SCOPED_TRACE(""); + void *Ptr = AllocateMemory(GPA); + DeallocateMemory(GPA, Ptr); + // First time should generate a crash report. + TouchMemory(Ptr); + ASSERT_NE(std::string::npos, GetOutputBuffer().find("Use After Free")); + + // Ensure the crash is only reported once. + GetOutputBuffer().clear(); + for (size_t i = 0; i < 100; ++i) { + TouchMemory(Ptr); + ASSERT_TRUE(GetOutputBuffer().empty()); + } +} + +TEST_P(BacktraceGuardedPoolAllocator, MultipleBufferOverflowOnlyOneOutput) { + SCOPED_TRACE(""); + char *Ptr = static_cast(AllocateMemory(GPA)); + // First time should generate a crash report. + TouchMemory(Ptr - 16); + TouchMemory(Ptr + 16); + CheckOnlyOneGwpAsanCrash(GetOutputBuffer()); + if (GetOutputBuffer().find("Buffer Overflow") == std::string::npos && + GetOutputBuffer().find("Buffer Underflow") == std::string::npos) + FAIL() << "Failed to detect buffer underflow/overflow:\n" + << GetOutputBuffer(); + + // Ensure the crash is only reported once. + GetOutputBuffer().clear(); + for (size_t i = 0; i < 100; ++i) { + TouchMemory(Ptr - 16); + TouchMemory(Ptr + 16); + ASSERT_TRUE(GetOutputBuffer().empty()) << GetOutputBuffer(); + } +} + +TEST_P(BacktraceGuardedPoolAllocator, OneDoubleFreeOneUseAfterFree) { + SCOPED_TRACE(""); + void *Ptr = AllocateMemory(GPA); + DeallocateMemory(GPA, Ptr); + // First time should generate a crash report. + DeallocateMemory(GPA, Ptr); + CheckOnlyOneGwpAsanCrash(GetOutputBuffer()); + ASSERT_NE(std::string::npos, GetOutputBuffer().find("Double Free")); + + // Ensure the crash is only reported once. + GetOutputBuffer().clear(); + for (size_t i = 0; i < 100; ++i) { + DeallocateMemory(GPA, Ptr); + ASSERT_TRUE(GetOutputBuffer().empty()); + } +} + +void singleAllocThrashTask(gwp_asan::GuardedPoolAllocator *GPA, + std::atomic *StartingGun, + unsigned NumIterations, unsigned Job, char *Ptr) { + while (!*StartingGun) { + // Wait for starting gun. + } + + for (unsigned i = 0; i < NumIterations; ++i) { + switch (Job) { + case 0: + DeallocateMemory(*GPA, Ptr); + break; + case 1: + DeallocateMemory(*GPA, Ptr + 1); + break; + case 2: + TouchMemory(Ptr); + break; + case 3: + TouchMemory(Ptr - 16); + TouchMemory(Ptr + 16); + break; + default: + __builtin_trap(); + } + } +} + +void runInterThreadThrashingSingleAlloc(unsigned NumIterations, + gwp_asan::GuardedPoolAllocator *GPA) { + std::atomic StartingGun{false}; + std::vector Threads; + constexpr unsigned kNumThreads = 4; + if (std::thread::hardware_concurrency() < kNumThreads) { + GTEST_SKIP() << "Not enough threads to run this test"; + } + + char *Ptr = static_cast(AllocateMemory(*GPA)); + + for (unsigned i = 0; i < kNumThreads; ++i) { + Threads.emplace_back(singleAllocThrashTask, GPA, &StartingGun, + NumIterations, i, Ptr); + } + + StartingGun = true; + + for (auto &T : Threads) + T.join(); +} + +TEST_P(BacktraceGuardedPoolAllocator, InterThreadThrashingSingleAlloc) { + SCOPED_TRACE(""); + constexpr unsigned kNumIterations = 100000; + runInterThreadThrashingSingleAlloc(kNumIterations, &GPA); + CheckOnlyOneGwpAsanCrash(GetOutputBuffer()); +} + +INSTANTIATE_TEST_SUITE_P(RecoverableTests, BacktraceGuardedPoolAllocator, + /* Recoverable */ testing::Values(true)); diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -185,6 +185,7 @@ getFlags()->GWP_ASAN_MaxSimultaneousAllocations; Opt.SampleRate = getFlags()->GWP_ASAN_SampleRate; Opt.InstallSignalHandlers = getFlags()->GWP_ASAN_InstallSignalHandlers; + Opt.Recoverable = getFlags()->GWP_ASAN_Recoverable; // Embedded GWP-ASan is locked through the Scudo atfork handler (via // Allocator::disable calling GWPASan.disable). Disable GWP-ASan's atfork // handler. @@ -196,7 +197,8 @@ gwp_asan::segv_handler::installSignalHandlers( &GuardedAlloc, Printf, gwp_asan::backtrace::getPrintBacktraceFunction(), - gwp_asan::backtrace::getSegvBacktraceFunction()); + gwp_asan::backtrace::getSegvBacktraceFunction(), + Opt.Recoverable); GuardedAllocSlotSize = GuardedAlloc.getAllocatorState()->maximumAllocationSize();