Index: compiler-rt/trunk/lib/gwp_asan/CMakeLists.txt =================================================================== --- compiler-rt/trunk/lib/gwp_asan/CMakeLists.txt +++ compiler-rt/trunk/lib/gwp_asan/CMakeLists.txt @@ -3,15 +3,19 @@ include_directories(..) set(GWP_ASAN_SOURCES + platform_specific/guarded_pool_allocator_posix.cpp platform_specific/mutex_posix.cpp + guarded_pool_allocator.cpp random.cpp ) set(GWP_ASAN_HEADERS + definitions.h + guarded_pool_allocator.h mutex.h - random.h options.h options.inc + random.h ) # Ensure that GWP-ASan meets the delegated requirements of some supporting Index: compiler-rt/trunk/lib/gwp_asan/definitions.h =================================================================== --- compiler-rt/trunk/lib/gwp_asan/definitions.h +++ compiler-rt/trunk/lib/gwp_asan/definitions.h @@ -0,0 +1,29 @@ +//===-- gwp_asan_definitions.h ----------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef GWP_ASAN_DEFINITIONS_H_ +#define GWP_ASAN_DEFINITIONS_H_ + +#define TLS_INITIAL_EXEC __thread __attribute__((tls_model("initial-exec"))) + +#ifdef LIKELY +# undef LIKELY +#endif // defined(LIKELY) +#define LIKELY(X) __builtin_expect(!!(X), 1) + +#ifdef UNLIKELY +# undef UNLIKELY +#endif // defined(UNLIKELY) +#define UNLIKELY(X) __builtin_expect(!!(X), 0) + +#ifdef ALWAYS_INLINE +# undef ALWAYS_INLINE +#endif // defined(ALWAYS_INLINE) +#define ALWAYS_INLINE inline __attribute__((always_inline)) + +#endif // GWP_ASAN_DEFINITIONS_H_ Index: compiler-rt/trunk/lib/gwp_asan/guarded_pool_allocator.h =================================================================== --- compiler-rt/trunk/lib/gwp_asan/guarded_pool_allocator.h +++ compiler-rt/trunk/lib/gwp_asan/guarded_pool_allocator.h @@ -0,0 +1,254 @@ +//===-- guarded_pool_allocator.h --------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef GWP_ASAN_GUARDED_POOL_ALLOCATOR_H_ +#define GWP_ASAN_GUARDED_POOL_ALLOCATOR_H_ + +#include "gwp_asan/definitions.h" +#include "gwp_asan/mutex.h" +#include "gwp_asan/options.h" +#include "gwp_asan/random.h" + +#include +#include + +namespace gwp_asan { +// This class is the primary implementation of the allocator portion of GWP- +// ASan. It is the sole owner of the pool of sequentially allocated guarded +// slots. It should always be treated as a singleton. + +// Functions in the public interface of this class are thread-compatible until +// init() is called, at which point they become thread-safe (unless specified +// otherwise). +class GuardedPoolAllocator { +public: + static constexpr uint64_t kInvalidThreadID = UINT64_MAX; + + enum class Error { + UNKNOWN, + USE_AFTER_FREE, + DOUBLE_FREE, + INVALID_FREE, + BUFFER_OVERFLOW, + BUFFER_UNDERFLOW + }; + + struct AllocationMetadata { + // Maximum number of stack trace frames to collect for allocations + frees. + // TODO(hctim): Implement stack frame compression, a-la Chromium. + // Currently the maximum stack frames is one, as we don't collect traces. + static constexpr size_t kMaximumStackFrames = 1; + + // Records the given allocation metadata into this struct. In the future, + // this will collect the allocation trace as well. + void RecordAllocation(uintptr_t Addr, size_t Size); + + // Record that this allocation is now deallocated. In future, this will + // collect the deallocation trace as well. + void RecordDeallocation(); + + struct CallSiteInfo { + // The backtrace to the allocation/deallocation. If the first value is + // zero, we did not collect a trace. + uintptr_t Trace[kMaximumStackFrames] = {}; + // The thread ID for this trace, or kInvalidThreadID if not available. + uint64_t ThreadID = kInvalidThreadID; + }; + + // The address of this allocation. + uintptr_t Addr = 0; + // Represents the actual size of the allocation. + size_t Size = 0; + + CallSiteInfo AllocationTrace; + CallSiteInfo DeallocationTrace; + + // Whether this allocation has been deallocated yet. + bool IsDeallocated = false; + }; + + // During program startup, we must ensure that memory allocations do not land + // in this allocation pool if the allocator decides to runtime-disable + // GWP-ASan. The constructor value-initialises the class such that if no + // further initialisation takes place, calls to shouldSample() and + // pointerIsMine() will return false. + constexpr GuardedPoolAllocator(){}; + GuardedPoolAllocator(const GuardedPoolAllocator &) = delete; + GuardedPoolAllocator &operator=(const GuardedPoolAllocator &) = delete; + + // Note: This class is expected to be a singleton for the lifetime of the + // program. If this object is initialised, it will leak the guarded page pool + // and metadata allocations during destruction. We can't clean up these areas + // as this may cause a use-after-free on shutdown. + ~GuardedPoolAllocator() = default; + + // Initialise the rest of the members of this class. Create the allocation + // pool using the provided options. See options.inc for runtime configuration + // options. + void init(const options::Options &Opts); + + // Return whether the allocation should be randomly chosen for sampling. + ALWAYS_INLINE bool shouldSample() { + // NextSampleCounter == 0 means we "should regenerate the counter". + // == 1 means we "should sample this allocation". + if (UNLIKELY(NextSampleCounter == 0)) { + // GuardedPagePoolEnd == 0 if GWP-ASan is disabled. + if (UNLIKELY(GuardedPagePoolEnd == 0)) + return false; + NextSampleCounter = (getRandomUnsigned32() % AdjustedSampleRate) + 1; + } + + return UNLIKELY(--NextSampleCounter == 0); + } + + // Returns whether the provided pointer is a current sampled allocation that + // is owned by this pool. + ALWAYS_INLINE bool pointerIsMine(const void *Ptr) const { + uintptr_t P = reinterpret_cast(Ptr); + return GuardedPagePool <= P && P < GuardedPagePoolEnd; + } + + // Allocate memory in a guarded slot, and return a pointer to the new + // allocation. Returns nullptr if the pool is empty, the requested size is too + // large for this pool to handle, or the requested size is zero. + void *allocate(size_t Size); + + // Deallocate memory in a guarded slot. The provided pointer must have been + // allocated using this pool. This will set the guarded slot as inaccessible. + void deallocate(void *Ptr); + + // Returns the size of the allocation at Ptr. + size_t getSize(const void *Ptr); + + // Returns the largest allocation that is supported by this pool. Any + // allocations larger than this should go to the regular system allocator. + size_t maximumAllocationSize() const; + + // Dumps an error report (including allocation and deallocation stack traces). + // An optional error may be provided if the caller knows what the error is + // ahead of time. This is primarily a helper function to locate the static + // singleton pointer and call the internal version of this function. This + // method is never thread safe, and should only be called when fatal errors + // occur. + static void reportError(uintptr_t AccessPtr, Error Error = Error::UNKNOWN); + +private: + static constexpr size_t kInvalidSlotID = SIZE_MAX; + + // These functions anonymously map memory or change the permissions of mapped + // memory into this process in a platform-specific way. Pointer and size + // arguments are expected to be page-aligned. These functions will never + // return on error, instead electing to kill the calling process on failure. + // Note that memory is initially mapped inaccessible. In order for RW + // mappings, call mapMemory() followed by markReadWrite() on the returned + // pointer. + void *mapMemory(size_t Size) const; + void markReadWrite(void *Ptr, size_t Size) const; + void markInaccessible(void *Ptr, size_t Size) const; + + // Get the current thread ID, or kInvalidThreadID if failure. Note: This + // implementation is platform-specific. + static uint64_t getThreadID(); + + // Get the page size from the platform-specific implementation. Only needs to + // be called once, and the result should be cached in PageSize in this class. + static size_t getPlatformPageSize(); + + // Install the SIGSEGV crash handler for printing use-after-free and heap- + // buffer-{under|over}flow exceptions. This is platform specific as even + // though POSIX and Windows both support registering handlers through + // signal(), we have to use platform-specific signal handlers to obtain the + // address that caused the SIGSEGV exception. + static void installSignalHandlers(); + + // Returns the index of the slot that this pointer resides in. If the pointer + // is not owned by this pool, the result is undefined. + size_t addrToSlot(uintptr_t Ptr) const; + + // Returns the address of the N-th guarded slot. + uintptr_t slotToAddr(size_t N) const; + + // Returns a pointer to the metadata for the owned pointer. If the pointer is + // not owned by this pool, the result is undefined. + AllocationMetadata *addrToMetadata(uintptr_t Ptr) const; + + // Returns the address of the page that this pointer resides in. + uintptr_t getPageAddr(uintptr_t Ptr) const; + + // Gets the nearest slot to the provided address. + size_t getNearestSlot(uintptr_t Ptr) const; + + // Returns whether the provided pointer is a guard page or not. The pointer + // must be within memory owned by this pool, else the result is undefined. + bool isGuardPage(uintptr_t Ptr) const; + + // Reserve a slot for a new guarded allocation. Returns kInvalidSlotID if no + // slot is available to be reserved. + size_t reserveSlot(); + + // Unreserve the guarded slot. + void freeSlot(size_t SlotIndex); + + // Returns the offset (in bytes) between the start of a guarded slot and where + // the start of the allocation should take place. Determined using the size of + // the allocation and the options provided at init-time. + uintptr_t allocationSlotOffset(size_t AllocationSize) const; + + // Returns the diagnosis for an unknown error. If the diagnosis is not + // Error::INVALID_FREE or Error::UNKNOWN, the metadata for the slot + // responsible for the error is placed in *Meta. + Error diagnoseUnknownError(uintptr_t AccessPtr, AllocationMetadata **Meta); + + void reportErrorInternal(uintptr_t AccessPtr, Error Error); + + // Cached page size for this system in bytes. + size_t PageSize = 0; + + // A mutex to protect the guarded slot and metadata pool for this class. + Mutex PoolMutex; + // The number of guarded slots that this pool holds. + size_t MaxSimultaneousAllocations = 0; + // Record the number allocations that we've sampled. We store this amount so + // that we don't randomly choose to recycle a slot that previously had an + // allocation before all the slots have been utilised. + size_t NumSampledAllocations = 0; + // Pointer to the pool of guarded slots. Note that this points to the start of + // the pool (which is a guard page), not a pointer to the first guarded page. + uintptr_t GuardedPagePool = UINTPTR_MAX; + uintptr_t GuardedPagePoolEnd = 0; + // Pointer to the allocation metadata (allocation/deallocation stack traces), + // if any. + AllocationMetadata *Metadata = nullptr; + + // Pointer to an array of free slot indexes. + size_t *FreeSlots = nullptr; + // The current length of the list of free slots. + size_t FreeSlotsLength = 0; + + // See options.{h, inc} for more information. + bool PerfectlyRightAlign = false; + + // Printf function supplied by the implementing allocator. We can't (in + // general) use printf() from the cstdlib as it may malloc(), causing infinite + // recursion. + options::Printf_t Printf = nullptr; + + // The adjusted sample rate for allocation sampling. Default *must* be + // nonzero, as dynamic initialisation may call malloc (e.g. from libstdc++) + // before GPA::init() is called. This would cause an error in shouldSample(), + // where we would calculate modulo zero. This value is set UINT32_MAX, as when + // GWP-ASan is disabled, we wish to never spend wasted cycles recalculating + // the sample rate. + uint32_t AdjustedSampleRate = UINT32_MAX; + // Thread-local decrementing counter that indicates that a given allocation + // should be sampled when it reaches zero. + static TLS_INITIAL_EXEC uint64_t NextSampleCounter; +}; +} // namespace gwp_asan + +#endif // GWP_ASAN_GUARDED_POOL_ALLOCATOR_H_ Index: compiler-rt/trunk/lib/gwp_asan/guarded_pool_allocator.cpp =================================================================== --- compiler-rt/trunk/lib/gwp_asan/guarded_pool_allocator.cpp +++ compiler-rt/trunk/lib/gwp_asan/guarded_pool_allocator.cpp @@ -0,0 +1,433 @@ +//===-- guarded_pool_allocator.cpp ------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/guarded_pool_allocator.h" + +#include "gwp_asan/options.h" + +#include +#include +#include +#include + +using AllocationMetadata = gwp_asan::GuardedPoolAllocator::AllocationMetadata; +using Error = gwp_asan::GuardedPoolAllocator::Error; + +namespace gwp_asan { +namespace { +// Forward declare the pointer to the singleton version of this class. +// Instantiated during initialisation, this allows the signal handler +// to find this class in order to deduce the root cause of failures. Must not be +// referenced by users outside this translation unit, in order to avoid +// init-order-fiasco. +GuardedPoolAllocator *SingletonPtr = nullptr; +} // anonymous namespace + +// Gets the singleton implementation of this class. Thread-compatible until +// init() is called, thread-safe afterwards. +GuardedPoolAllocator *getSingleton() { return SingletonPtr; } + +void GuardedPoolAllocator::AllocationMetadata::RecordAllocation( + uintptr_t AllocAddr, size_t AllocSize) { + Addr = AllocAddr; + Size = AllocSize; + IsDeallocated = false; + + // TODO(hctim): Implement stack trace collection. + // TODO(hctim): Ask the caller to provide the thread ID, so we don't waste + // other thread's time getting the thread ID under lock. + AllocationTrace.ThreadID = getThreadID(); + DeallocationTrace.ThreadID = kInvalidThreadID; + AllocationTrace.Trace[0] = 0; + DeallocationTrace.Trace[0] = 0; +} + +void GuardedPoolAllocator::AllocationMetadata::RecordDeallocation() { + IsDeallocated = true; + // TODO(hctim): Implement stack trace collection. + DeallocationTrace.ThreadID = getThreadID(); +} + +void GuardedPoolAllocator::init(const options::Options &Opts) { + // Note: We return from the constructor here if GWP-ASan is not available. + // This will stop heap-allocation of class members, as well as mmap() of the + // guarded slots. + if (!Opts.Enabled || Opts.SampleRate == 0 || + Opts.MaxSimultaneousAllocations == 0) + return; + + // TODO(hctim): Add a death unit test for this. + if (SingletonPtr) { + (*SingletonPtr->Printf)( + "GWP-ASan Error: init() has already been called.\n"); + exit(EXIT_FAILURE); + } + + if (Opts.SampleRate < 0) { + Opts.Printf("GWP-ASan Error: SampleRate is < 0.\n"); + exit(EXIT_FAILURE); + } + + if (Opts.SampleRate > INT32_MAX) { + Opts.Printf("GWP-ASan Error: SampleRate is > 2^31.\n"); + exit(EXIT_FAILURE); + } + + if (Opts.MaxSimultaneousAllocations < 0) { + Opts.Printf("GWP-ASan Error: MaxSimultaneousAllocations is < 0.\n"); + exit(EXIT_FAILURE); + } + + SingletonPtr = this; + + MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations; + + PageSize = getPlatformPageSize(); + + PerfectlyRightAlign = Opts.PerfectlyRightAlign; + Printf = Opts.Printf; + + size_t PoolBytesRequired = + PageSize * (1 + MaxSimultaneousAllocations) + + MaxSimultaneousAllocations * maximumAllocationSize(); + void *GuardedPoolMemory = mapMemory(PoolBytesRequired); + + size_t BytesRequired = MaxSimultaneousAllocations * sizeof(*Metadata); + Metadata = reinterpret_cast(mapMemory(BytesRequired)); + markReadWrite(Metadata, BytesRequired); + + // Allocate memory and set up the free pages queue. + BytesRequired = MaxSimultaneousAllocations * sizeof(*FreeSlots); + FreeSlots = reinterpret_cast(mapMemory(BytesRequired)); + markReadWrite(FreeSlots, BytesRequired); + + // Multiply the sample rate by 2 to give a good, fast approximation for (1 / + // SampleRate) chance of sampling. + if (Opts.SampleRate != 1) + AdjustedSampleRate = static_cast(Opts.SampleRate) * 2; + else + AdjustedSampleRate = 1; + + GuardedPagePool = reinterpret_cast(GuardedPoolMemory); + GuardedPagePoolEnd = + reinterpret_cast(GuardedPoolMemory) + PoolBytesRequired; + + // Ensure that signal handlers are installed as late as possible, as the class + // is not thread-safe until init() is finished, and thus a SIGSEGV may cause a + // race to members if recieved during init(). + if (Opts.InstallSignalHandlers) + installSignalHandlers(); +} + +void *GuardedPoolAllocator::allocate(size_t Size) { + if (Size == 0 || Size > maximumAllocationSize()) + return nullptr; + + size_t Index; + { + ScopedLock L(PoolMutex); + Index = reserveSlot(); + } + + if (Index == kInvalidSlotID) + return nullptr; + + uintptr_t Ptr = slotToAddr(Index); + Ptr += allocationSlotOffset(Size); + AllocationMetadata *Meta = addrToMetadata(Ptr); + + // If a slot is multiple pages in size, and the allocation takes up a single + // page, we can improve overflow detection by leaving the unused pages as + // unmapped. + markReadWrite(reinterpret_cast(getPageAddr(Ptr)), Size); + + Meta->RecordAllocation(Ptr, Size); + + return reinterpret_cast(Ptr); +} + +void GuardedPoolAllocator::deallocate(void *Ptr) { + assert(pointerIsMine(Ptr) && "Pointer is not mine!"); + uintptr_t UPtr = reinterpret_cast(Ptr); + uintptr_t SlotStart = slotToAddr(addrToSlot(UPtr)); + AllocationMetadata *Meta = addrToMetadata(UPtr); + if (Meta->Addr != UPtr) { + reportError(UPtr, Error::INVALID_FREE); + exit(EXIT_FAILURE); + } + + // Intentionally scope the mutex here, so that other threads can access the + // pool during the expensive markInaccessible() call. + { + ScopedLock L(PoolMutex); + if (Meta->IsDeallocated) { + reportError(UPtr, Error::DOUBLE_FREE); + exit(EXIT_FAILURE); + } + + // Ensure that the deallocation is recorded before marking the page as + // inaccessible. Otherwise, a racy use-after-free will have inconsistent + // metadata. + Meta->RecordDeallocation(); + } + + markInaccessible(reinterpret_cast(SlotStart), + maximumAllocationSize()); + + // And finally, lock again to release the slot back into the pool. + ScopedLock L(PoolMutex); + freeSlot(addrToSlot(UPtr)); +} + +size_t GuardedPoolAllocator::getSize(const void *Ptr) { + assert(pointerIsMine(Ptr)); + ScopedLock L(PoolMutex); + AllocationMetadata *Meta = addrToMetadata(reinterpret_cast(Ptr)); + assert(Meta->Addr == reinterpret_cast(Ptr)); + return Meta->Size; +} + +size_t GuardedPoolAllocator::maximumAllocationSize() const { return PageSize; } + +AllocationMetadata *GuardedPoolAllocator::addrToMetadata(uintptr_t Ptr) const { + return &Metadata[addrToSlot(Ptr)]; +} + +size_t GuardedPoolAllocator::addrToSlot(uintptr_t Ptr) const { + assert(pointerIsMine(reinterpret_cast(Ptr))); + size_t ByteOffsetFromPoolStart = Ptr - GuardedPagePool; + return ByteOffsetFromPoolStart / (maximumAllocationSize() + PageSize); +} + +uintptr_t GuardedPoolAllocator::slotToAddr(size_t N) const { + return GuardedPagePool + (PageSize * (1 + N)) + (maximumAllocationSize() * N); +} + +uintptr_t GuardedPoolAllocator::getPageAddr(uintptr_t Ptr) const { + assert(pointerIsMine(reinterpret_cast(Ptr))); + return Ptr & ~(static_cast(PageSize) - 1); +} + +bool GuardedPoolAllocator::isGuardPage(uintptr_t Ptr) const { + assert(pointerIsMine(reinterpret_cast(Ptr))); + size_t PageOffsetFromPoolStart = (Ptr - GuardedPagePool) / PageSize; + size_t PagesPerSlot = maximumAllocationSize() / PageSize; + return (PageOffsetFromPoolStart % (PagesPerSlot + 1)) == 0; +} + +size_t GuardedPoolAllocator::reserveSlot() { + // Avoid potential reuse of a slot before we have made at least a single + // allocation in each slot. Helps with our use-after-free detection. + if (NumSampledAllocations < MaxSimultaneousAllocations) + return NumSampledAllocations++; + + if (FreeSlotsLength == 0) + return kInvalidSlotID; + + size_t ReservedIndex = getRandomUnsigned32() % FreeSlotsLength; + size_t SlotIndex = FreeSlots[ReservedIndex]; + FreeSlots[ReservedIndex] = FreeSlots[--FreeSlotsLength]; + return SlotIndex; +} + +void GuardedPoolAllocator::freeSlot(size_t SlotIndex) { + assert(FreeSlotsLength < MaxSimultaneousAllocations); + FreeSlots[FreeSlotsLength++] = SlotIndex; +} + +uintptr_t GuardedPoolAllocator::allocationSlotOffset(size_t Size) const { + assert(Size > 0); + + bool ShouldRightAlign = getRandomUnsigned32() % 2 == 0; + if (!ShouldRightAlign) + return 0; + + uintptr_t Offset = maximumAllocationSize(); + if (!PerfectlyRightAlign) { + if (Size == 3) + Size = 4; + else if (Size > 4 && Size <= 8) + Size = 8; + else if (Size > 8 && (Size % 16) != 0) + Size += 16 - (Size % 16); + } + Offset -= Size; + return Offset; +} + +void GuardedPoolAllocator::reportError(uintptr_t AccessPtr, Error Error) { + if (SingletonPtr) + SingletonPtr->reportErrorInternal(AccessPtr, Error); +} + +size_t GuardedPoolAllocator::getNearestSlot(uintptr_t Ptr) const { + if (Ptr <= GuardedPagePool + PageSize) + return 0; + if (Ptr > GuardedPagePoolEnd - PageSize) + return MaxSimultaneousAllocations - 1; + + if (!isGuardPage(Ptr)) + return addrToSlot(Ptr); + + if (Ptr % PageSize <= PageSize / 2) + return addrToSlot(Ptr - PageSize); // Round down. + return addrToSlot(Ptr + PageSize); // Round up. +} + +Error GuardedPoolAllocator::diagnoseUnknownError(uintptr_t AccessPtr, + AllocationMetadata **Meta) { + // Let's try and figure out what the source of this error is. + if (isGuardPage(AccessPtr)) { + size_t Slot = getNearestSlot(AccessPtr); + AllocationMetadata *SlotMeta = addrToMetadata(slotToAddr(Slot)); + + // Ensure that this slot was allocated once upon a time. + if (!SlotMeta->Addr) + return Error::UNKNOWN; + *Meta = SlotMeta; + + if (SlotMeta->Addr < AccessPtr) + return Error::BUFFER_OVERFLOW; + return Error::BUFFER_UNDERFLOW; + } + + // Access wasn't a guard page, check for use-after-free. + AllocationMetadata *SlotMeta = addrToMetadata(AccessPtr); + if (SlotMeta->IsDeallocated) { + *Meta = SlotMeta; + return Error::USE_AFTER_FREE; + } + + // If we have reached here, the error is still unknown. There is no metadata + // available. + return Error::UNKNOWN; +} + +// Prints the provided error and metadata information. Returns true if there is +// additional context that can be provided, false otherwise (i.e. returns false +// if Error == {UNKNOWN, INVALID_FREE without metadata}). +bool printErrorType(Error Error, uintptr_t AccessPtr, AllocationMetadata *Meta, + options::Printf_t Printf) { + switch (Error) { + case Error::UNKNOWN: + Printf("GWP-ASan couldn't automatically determine the source of the " + "memory error when accessing 0x%zx. It was likely caused by a wild " + "memory access into the GWP-ASan pool.\n", + AccessPtr); + return false; + case Error::USE_AFTER_FREE: + Printf("Use after free occurred when accessing memory at: 0x%zx\n", + AccessPtr); + break; + case Error::DOUBLE_FREE: + Printf("Double free occurred when trying to free memory at: 0x%zx\n", + AccessPtr); + break; + case Error::INVALID_FREE: + Printf( + "Invalid (wild) free occurred when trying to free memory at: 0x%zx\n", + AccessPtr); + // It's possible for an invalid free to fall onto a slot that has never been + // allocated. If this is the case, there is no valid metadata. + if (Meta == nullptr) + return false; + break; + case Error::BUFFER_OVERFLOW: + Printf("Buffer overflow occurred when accessing memory at: 0x%zx\n", + AccessPtr); + break; + case Error::BUFFER_UNDERFLOW: + Printf("Buffer underflow occurred when accessing memory at: 0x%zx\n", + AccessPtr); + break; + } + + Printf("0x%zx is ", AccessPtr); + if (AccessPtr < Meta->Addr) + Printf("located %zu bytes to the left of a %zu-byte allocation located at " + "0x%zx\n", + Meta->Addr - AccessPtr, Meta->Size, Meta->Addr); + else if (AccessPtr > Meta->Addr) + Printf("located %zu bytes to the right of a %zu-byte allocation located at " + "0x%zx\n", + AccessPtr - Meta->Addr, Meta->Size, Meta->Addr); + else + Printf("a %zu-byte allocation\n", Meta->Size); + return true; +} + +void printThreadInformation(Error Error, uintptr_t AccessPtr, + AllocationMetadata *Meta, + options::Printf_t Printf) { + Printf("0x%zx was allocated by thread ", AccessPtr); + if (Meta->AllocationTrace.ThreadID == UINT64_MAX) + Printf("UNKNOWN.\n"); + else + Printf("%zu.\n", Meta->AllocationTrace.ThreadID); + + if (Error == Error::USE_AFTER_FREE || Error == Error::DOUBLE_FREE) { + Printf("0x%zx was freed by thread ", AccessPtr); + if (Meta->AllocationTrace.ThreadID == UINT64_MAX) + Printf("UNKNOWN.\n"); + else + Printf("%zu.\n", Meta->AllocationTrace.ThreadID); + } +} + +struct ScopedEndOfReportDecorator { + ScopedEndOfReportDecorator(options::Printf_t Printf) : Printf(Printf) {} + ~ScopedEndOfReportDecorator() { Printf("*** End GWP-ASan report ***\n"); } + options::Printf_t Printf; +}; + +void GuardedPoolAllocator::reportErrorInternal(uintptr_t AccessPtr, + Error Error) { + if (!pointerIsMine(reinterpret_cast(AccessPtr))) { + return; + } + + // Attempt to prevent races to re-use the same slot that triggered this error. + // This does not guarantee that there are no races, because another thread can + // take the locks during the time that the signal handler is being called. + PoolMutex.tryLock(); + + Printf("*** GWP-ASan detected a memory error ***\n"); + ScopedEndOfReportDecorator Decorator(Printf); + + AllocationMetadata *Meta = nullptr; + + if (Error == Error::UNKNOWN) { + Error = diagnoseUnknownError(AccessPtr, &Meta); + } else { + size_t Slot = getNearestSlot(AccessPtr); + Meta = addrToMetadata(slotToAddr(Slot)); + // Ensure that this slot has been previously allocated. + if (!Meta->Addr) + Meta = nullptr; + } + + // Print the error information, and if there is no valid metadata, stop here. + if (!printErrorType(Error, AccessPtr, Meta, Printf)) { + return; + } + + // Ensure that we have a valid metadata pointer from this point forward. + if (Meta == nullptr) { + Printf("GWP-ASan internal unreachable error. Metadata is not null.\n"); + return; + } + + printThreadInformation(Error, AccessPtr, Meta, Printf); + // TODO(hctim): Implement stack unwinding here. Ask the caller to provide us + // with the base pointer, and we unwind the stack to give a stack trace for + // the access. + // TODO(hctim): Implement dumping here of allocation/deallocation traces. +} + +TLS_INITIAL_EXEC uint64_t GuardedPoolAllocator::NextSampleCounter = 0; +} // namespace gwp_asan Index: compiler-rt/trunk/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp =================================================================== --- compiler-rt/trunk/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp +++ compiler-rt/trunk/lib/gwp_asan/platform_specific/guarded_pool_allocator_posix.cpp @@ -0,0 +1,96 @@ +//===-- guarded_pool_allocator_posix.cpp ------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/guarded_pool_allocator.h" + +#include +#include +#include +#include +#include +#include +#include + +namespace gwp_asan { + +void *GuardedPoolAllocator::mapMemory(size_t Size) const { + void *Ptr = + mmap(nullptr, Size, PROT_NONE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); + + if (Ptr == MAP_FAILED) { + Printf("Failed to map guarded pool allocator memory, errno: %d\n", errno); + Printf(" mmap(nullptr, %zu, ...) failed.\n", Size); + exit(EXIT_FAILURE); + } + return Ptr; +} + +void GuardedPoolAllocator::markReadWrite(void *Ptr, size_t Size) const { + if (mprotect(Ptr, Size, PROT_READ | PROT_WRITE) != 0) { + Printf("Failed to set guarded pool allocator memory at as RW, errno: %d\n", + errno); + Printf(" mprotect(%p, %zu, RW) failed.\n", Ptr, Size); + exit(EXIT_FAILURE); + } +} + +void GuardedPoolAllocator::markInaccessible(void *Ptr, size_t Size) const { + // mmap() a PROT_NONE page over the address to release it to the system, if + // we used mprotect() here the system would count pages in the quarantine + // against the RSS. + if (mmap(Ptr, Size, PROT_NONE, MAP_FIXED | MAP_ANONYMOUS | MAP_PRIVATE, -1, + 0) == MAP_FAILED) { + Printf("Failed to set guarded pool allocator memory as inaccessible, " + "errno: %d\n", + errno); + Printf(" mmap(%p, %zu, NONE, ...) failed.\n", Ptr, Size); + exit(EXIT_FAILURE); + } +} + +size_t GuardedPoolAllocator::getPlatformPageSize() { + return sysconf(_SC_PAGESIZE); +} + +struct sigaction PreviousHandler; + +static void sigSegvHandler(int sig, siginfo_t *info, void *ucontext) { + gwp_asan::GuardedPoolAllocator::reportError( + reinterpret_cast(info->si_addr)); + + // Process any previous handlers. + if (PreviousHandler.sa_flags & SA_SIGINFO) { + PreviousHandler.sa_sigaction(sig, info, ucontext); + } else if (PreviousHandler.sa_handler == SIG_IGN || + PreviousHandler.sa_handler == SIG_DFL) { + // If the previous handler was the default handler, or was ignoring this + // signal, install the default handler and re-raise the signal in order to + // get a core dump and terminate this process. + signal(SIGSEGV, SIG_DFL); + raise(SIGSEGV); + } else { + PreviousHandler.sa_handler(sig); + } +} + +void GuardedPoolAllocator::installSignalHandlers() { + struct sigaction Action; + Action.sa_sigaction = sigSegvHandler; + Action.sa_flags = SA_SIGINFO; + sigaction(SIGSEGV, &Action, &PreviousHandler); +} + +uint64_t GuardedPoolAllocator::getThreadID() { +#ifdef SYS_gettid + return syscall(SYS_gettid); +#else + return kInvalidThreadID; +#endif +} + +} // namespace gwp_asan Index: compiler-rt/trunk/lib/gwp_asan/tests/CMakeLists.txt =================================================================== --- compiler-rt/trunk/lib/gwp_asan/tests/CMakeLists.txt +++ compiler-rt/trunk/lib/gwp_asan/tests/CMakeLists.txt @@ -9,7 +9,8 @@ file(GLOB GWP_ASAN_HEADERS ../*.h) file(GLOB GWP_ASAN_UNITTESTS *.cpp) set(GWP_ASAN_UNIT_TEST_HEADERS - ${GWP_ASAN_HEADERS}) + ${GWP_ASAN_HEADERS} + harness.h) add_custom_target(GwpAsanUnitTests) set_target_properties(GwpAsanUnitTests PROPERTIES FOLDER "Compiler-RT Tests") @@ -26,8 +27,11 @@ set(GWP_ASAN_TEST_RUNTIME RTGwpAsanTest.${arch}) + # RTSanitizerCommonNoTermination(NoLibc) required for __sanitizer::Printf. set(GWP_ASAN_TEST_RUNTIME_OBJECTS - $) + $ + $ + $) add_library(${GWP_ASAN_TEST_RUNTIME} STATIC ${GWP_ASAN_TEST_RUNTIME_OBJECTS}) Index: compiler-rt/trunk/lib/gwp_asan/tests/alignment.cpp =================================================================== --- compiler-rt/trunk/lib/gwp_asan/tests/alignment.cpp +++ compiler-rt/trunk/lib/gwp_asan/tests/alignment.cpp @@ -0,0 +1,27 @@ +//===-- alignment.cc --------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/tests/harness.h" + +TEST_F(DefaultGuardedPoolAllocator, BasicAllocation) { + std::vector> AllocSizeToAlignment = { + {1, 1}, {2, 2}, {3, 4}, {4, 4}, {5, 8}, {7, 8}, + {8, 8}, {9, 16}, {15, 16}, {16, 16}, {17, 16}, {31, 16}, + {32, 16}, {33, 16}, {4095, 4096}, {4096, 4096}, + }; + + for (const auto &KV : AllocSizeToAlignment) { + void *Ptr = GPA.allocate(KV.first); + EXPECT_NE(nullptr, Ptr); + + // Check the alignment of the pointer is as expected. + EXPECT_EQ(0u, reinterpret_cast(Ptr) % KV.second); + + GPA.deallocate(Ptr); + } +} Index: compiler-rt/trunk/lib/gwp_asan/tests/basic.cpp =================================================================== --- compiler-rt/trunk/lib/gwp_asan/tests/basic.cpp +++ compiler-rt/trunk/lib/gwp_asan/tests/basic.cpp @@ -0,0 +1,60 @@ +//===-- basic.cc ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/tests/harness.h" + +TEST_F(CustomGuardedPoolAllocator, BasicAllocation) { + InitNumSlots(1); + void *Ptr = GPA.allocate(1); + EXPECT_NE(nullptr, Ptr); + EXPECT_TRUE(GPA.pointerIsMine(Ptr)); + EXPECT_EQ(1u, GPA.getSize(Ptr)); + GPA.deallocate(Ptr); +} + +TEST_F(DefaultGuardedPoolAllocator, NullptrIsNotMine) { + EXPECT_FALSE(GPA.pointerIsMine(nullptr)); +} + +TEST_F(CustomGuardedPoolAllocator, SizedAllocations) { + InitNumSlots(1); + + std::size_t MaxAllocSize = GPA.maximumAllocationSize(); + EXPECT_TRUE(MaxAllocSize > 0); + + for (unsigned AllocSize = 1; AllocSize <= MaxAllocSize; AllocSize <<= 1) { + void *Ptr = GPA.allocate(AllocSize); + EXPECT_NE(nullptr, Ptr); + EXPECT_TRUE(GPA.pointerIsMine(Ptr)); + EXPECT_EQ(AllocSize, GPA.getSize(Ptr)); + GPA.deallocate(Ptr); + } +} + +TEST_F(DefaultGuardedPoolAllocator, TooLargeAllocation) { + EXPECT_EQ(nullptr, GPA.allocate(GPA.maximumAllocationSize() + 1)); +} + +TEST_F(CustomGuardedPoolAllocator, AllocAllSlots) { + constexpr unsigned kNumSlots = 128; + InitNumSlots(kNumSlots); + void *Ptrs[kNumSlots]; + for (unsigned i = 0; i < kNumSlots; ++i) { + Ptrs[i] = GPA.allocate(1); + EXPECT_NE(nullptr, Ptrs[i]); + EXPECT_TRUE(GPA.pointerIsMine(Ptrs[i])); + } + + // This allocation should fail as all the slots are used. + void *Ptr = GPA.allocate(1); + EXPECT_EQ(nullptr, Ptr); + EXPECT_FALSE(GPA.pointerIsMine(nullptr)); + + for (unsigned i = 0; i < kNumSlots; ++i) + GPA.deallocate(Ptrs[i]); +} Index: compiler-rt/trunk/lib/gwp_asan/tests/harness.h =================================================================== --- compiler-rt/trunk/lib/gwp_asan/tests/harness.h +++ compiler-rt/trunk/lib/gwp_asan/tests/harness.h @@ -0,0 +1,60 @@ +//===-- harness.h -----------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef GWP_ASAN_TESTS_HARNESS_H_ +#define GWP_ASAN_TESTS_HARNESS_H_ + +#include "gtest/gtest.h" + +// Include sanitizer_common first as gwp_asan/guarded_pool_allocator.h +// transiently includes definitions.h, which overwrites some of the definitions +// in sanitizer_common. +#include "sanitizer_common/sanitizer_common.h" + +#include "gwp_asan/guarded_pool_allocator.h" +#include "gwp_asan/options.h" + +class DefaultGuardedPoolAllocator : public ::testing::Test { +public: + DefaultGuardedPoolAllocator() { + gwp_asan::options::Options Opts; + Opts.setDefaults(); + MaxSimultaneousAllocations = Opts.MaxSimultaneousAllocations; + + Opts.Printf = __sanitizer::Printf; + GPA.init(Opts); + } + +protected: + gwp_asan::GuardedPoolAllocator GPA; + decltype(gwp_asan::options::Options::MaxSimultaneousAllocations) + MaxSimultaneousAllocations; +}; + +class CustomGuardedPoolAllocator : public ::testing::Test { +public: + void + InitNumSlots(decltype(gwp_asan::options::Options::MaxSimultaneousAllocations) + MaxSimultaneousAllocationsArg) { + gwp_asan::options::Options Opts; + Opts.setDefaults(); + + Opts.MaxSimultaneousAllocations = MaxSimultaneousAllocationsArg; + MaxSimultaneousAllocations = MaxSimultaneousAllocationsArg; + + Opts.Printf = __sanitizer::Printf; + GPA.init(Opts); + } + +protected: + gwp_asan::GuardedPoolAllocator GPA; + decltype(gwp_asan::options::Options::MaxSimultaneousAllocations) + MaxSimultaneousAllocations; +}; + +#endif // GWP_ASAN_TESTS_HARNESS_H_ Index: compiler-rt/trunk/lib/gwp_asan/tests/slot_reuse.cpp =================================================================== --- compiler-rt/trunk/lib/gwp_asan/tests/slot_reuse.cpp +++ compiler-rt/trunk/lib/gwp_asan/tests/slot_reuse.cpp @@ -0,0 +1,72 @@ +//===-- slot_reuse.cc -------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/tests/harness.h" + +void singleByteGoodAllocDealloc(gwp_asan::GuardedPoolAllocator *GPA) { + void *Ptr = GPA->allocate(1); + EXPECT_NE(nullptr, Ptr); + EXPECT_TRUE(GPA->pointerIsMine(Ptr)); + EXPECT_EQ(1u, GPA->getSize(Ptr)); + GPA->deallocate(Ptr); +} + +TEST_F(CustomGuardedPoolAllocator, EnsureReuseOfQuarantine1) { + InitNumSlots(1); + for (unsigned i = 0; i < 128; ++i) + singleByteGoodAllocDealloc(&GPA); +} + +TEST_F(CustomGuardedPoolAllocator, EnsureReuseOfQuarantine2) { + InitNumSlots(2); + for (unsigned i = 0; i < 128; ++i) + singleByteGoodAllocDealloc(&GPA); +} + +TEST_F(CustomGuardedPoolAllocator, EnsureReuseOfQuarantine127) { + InitNumSlots(127); + for (unsigned i = 0; i < 128; ++i) + singleByteGoodAllocDealloc(&GPA); +} + +// This test ensures that our slots are not reused ahead of time. We increase +// the use-after-free detection by not reusing slots until all of them have been +// allocated. This is done by always using the slots from left-to-right in the +// pool before we used each slot once, at which point random selection takes +// over. +void runNoReuseBeforeNecessary(gwp_asan::GuardedPoolAllocator *GPA, + unsigned PoolSize) { + std::set Ptrs; + for (unsigned i = 0; i < PoolSize; ++i) { + void *Ptr = GPA->allocate(1); + + EXPECT_TRUE(GPA->pointerIsMine(Ptr)); + EXPECT_EQ(0u, Ptrs.count(Ptr)); + + Ptrs.insert(Ptr); + GPA->deallocate(Ptr); + } +} + +TEST_F(CustomGuardedPoolAllocator, NoReuseBeforeNecessary2) { + constexpr unsigned kPoolSize = 2; + InitNumSlots(kPoolSize); + runNoReuseBeforeNecessary(&GPA, kPoolSize); +} + +TEST_F(CustomGuardedPoolAllocator, NoReuseBeforeNecessary128) { + constexpr unsigned kPoolSize = 128; + InitNumSlots(kPoolSize); + runNoReuseBeforeNecessary(&GPA, kPoolSize); +} + +TEST_F(CustomGuardedPoolAllocator, NoReuseBeforeNecessary129) { + constexpr unsigned kPoolSize = 129; + InitNumSlots(kPoolSize); + runNoReuseBeforeNecessary(&GPA, kPoolSize); +} Index: compiler-rt/trunk/lib/gwp_asan/tests/thread_contention.cpp =================================================================== --- compiler-rt/trunk/lib/gwp_asan/tests/thread_contention.cpp +++ compiler-rt/trunk/lib/gwp_asan/tests/thread_contention.cpp @@ -0,0 +1,69 @@ +//===-- thread_contention.cc ------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#include "gwp_asan/tests/harness.h" + +// Note: Compilation of and are extremely expensive for +// non-opt builds of clang. +#include +#include +#include +#include + +void asyncTask(gwp_asan::GuardedPoolAllocator *GPA, + std::atomic *StartingGun, unsigned NumIterations) { + while (!*StartingGun) { + // Wait for starting gun. + } + + // Get ourselves a new allocation. + for (unsigned i = 0; i < NumIterations; ++i) { + volatile char *Ptr = reinterpret_cast( + GPA->allocate(GPA->maximumAllocationSize())); + // Do any other threads have access to this page? + EXPECT_EQ(*Ptr, 0); + + // Mark the page as from malloc. Wait to see if another thread also takes + // this page. + *Ptr = 'A'; + std::this_thread::sleep_for(std::chrono::nanoseconds(10000)); + + // Check we still own the page. + EXPECT_EQ(*Ptr, 'A'); + + // And now release it. + *Ptr = 0; + GPA->deallocate(const_cast(Ptr)); + } +} + +void runThreadContentionTest(unsigned NumThreads, unsigned NumIterations, + gwp_asan::GuardedPoolAllocator *GPA) { + + std::atomic StartingGun{false}; + std::vector Threads; + if (std::thread::hardware_concurrency() < NumThreads) { + NumThreads = std::thread::hardware_concurrency(); + } + + for (unsigned i = 0; i < NumThreads; ++i) { + Threads.emplace_back(asyncTask, GPA, &StartingGun, NumIterations); + } + + StartingGun = true; + + for (auto &T : Threads) + T.join(); +} + +TEST_F(CustomGuardedPoolAllocator, ThreadContention) { + unsigned NumThreads = 4; + unsigned NumIterations = 10000; + InitNumSlots(NumThreads); + runThreadContentionTest(NumThreads, NumIterations, &GPA); +}