Index: docs/HardenedAllocator.rst =================================================================== --- docs/HardenedAllocator.rst +++ docs/HardenedAllocator.rst @@ -0,0 +1,111 @@ +======================== +Scudo Hardened Allocator +======================== + +.. contents:: + :local: + :depth: 1 + +Introduction +============ +The Scudo Hardened Allocator is a user-mode allocator based on LLVM Sanitizer's +CombinedAllocator, which aims at providing additional mitigations against heap +based vulnerabilities, while retaining good performance. + +The name "Scudo" has been retained from the initial implementation (Escudo +meaning Shield in Spanish and Portuguese). + +Design +====== +Chunk Header +------------ +Every chunk of heap memory will be preceded by a chunk header. This has two +purposes, the first one being to store various information about the chunk, +the second one being to detect potential heap overflows. In order to achieve +this, the header will be checksumed, involving the pointer to the chunk itself +and a global secret. Any corruption of the header will be detected when said +header is accessed, and the process terminated. + +The following information is stored in the header: + +- the 16-bit checksum; +- the user requested size for that chunk, which is necessary for reallocation + purposes; +- the state of the chunk (available, allocated or quarantined); +- the allocation type (malloc, new, new[] or memalign), to detect potential + mismatches in the allocation APIs used; +- whether or not the chunk is offseted (ie: if the chunk beginning is different + than the backend allocation beginning, which is most often the case with some + aligned allocations); +- the associated offset; +- a 16-bit salt. + +On x64, which is currently the only architecture supported, the header fits +within 16-bytes, which works nicely with the minimum alignment requirements. + +The checksum is computed as a CRC32 (if the associated CPU instructions are +available) of the chunk pointer itself, and the 16 bytes of header with the +checksum field zeroed out. The result is then xored with a global secret. + +The header is atomically loaded and stored to prevent races (this requires +platform support such as the cmpxchg16b intruction). This is important as two +consecutives chunks could belong to different threads. We also want to avoid +any type of double fetches of information located in the header, and use local +copies of the header for this purpose. + +Delayed Freelist +----------------- +A delayed freelist allows us to not return a chunk directly to the backend, but +to keep it aside for a while. Once a criterion is met, the delayed freelist is +emptied, and the quarantined chunks are returned to the backend. This helps +mitigate use-after-free vulnerabilities by reducing the determinism of the +allocation and deallocation patterns. + +This feature is using the Sanitizer's Quarantine as its base, and the amount of +memory that it can hold is configurable by the user (see the Options section +below). + +Randomness +---------- +It is important for the allocator to not make use of fixed addresses. We use +the dynamic base option for the SizeClassAllocator, allowing us to benefit +from the randomness of mmap. + +Usage +===== + +Library +------- +The allocator static library can be built from the LLVM build tree thanks to +the "hardened_allocator" CMake rule. The associated tests can be exercised +thanks to the "check-hardened_allocator" CMake rule. + +Linking the static library to your project will likely require the use of the +"whole-archive" linker flag (or equivalent) as we make use of the +.preinit_array section to initialize the allocator. Additional linker flags can +be required depending on your project. + +Your linked binary should now make use of the Scudo allocation and deallocation +functions. + +Options +------- +Several aspects of the allocator can be configured through environment options, +following the usual ASan options syntax, through the variable SCUDO_OPTIONS. + +The following options are available: + +- quarantine_size_mb (integer, defaults to -1): the size (in Mb) of quarantine + used to delay the actual deallocation of chunks. Lower value may reduce + memory usage but decrease the effectiveness of the mitigation; a negative + value will fallback to a default of 64Mb; + +- alloc_dealloc_mismatch (boolean, defaults to true): whether or not we report + errors on malloc/delete, new/free, new/delete[], etc; + +- new_delete_size_mismatch (boolean, defaults to true): whether or not we + report errors on mismatch between size of new and delete; + +- zero_chunk_contents (boolean, defaults to false): whether or not we zero + chunk contents on allocation and deallocation. + Index: projects/compiler-rt/cmake/config-ix.cmake =================================================================== --- projects/compiler-rt/cmake/config-ix.cmake +++ projects/compiler-rt/cmake/config-ix.cmake @@ -192,6 +192,7 @@ set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64}) set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64}) set(ALL_ESAN_SUPPORTED_ARCH ${X86_64}) +set(ALL_HARDENED_ALLOCATOR_SUPPORTED_ARCH ${X86_64}) if(APPLE) include(CompilerRTDarwinUtils) @@ -378,6 +379,9 @@ list_intersect(ESAN_SUPPORTED_ARCH ALL_ESAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) + list_intersect(HARDENED_ALLOCATOR_SUPPORTED_ARCH + ALL_HARDENED_ALLOCATOR_SUPPORTED_ARCH + SANITIZER_COMMON_SUPPORTED_ARCH) else() # Architectures supported by compiler-rt libraries. filter_available_targets(SANITIZER_COMMON_SUPPORTED_ARCH @@ -399,6 +403,8 @@ ${ALL_SAFESTACK_SUPPORTED_ARCH}) filter_available_targets(CFI_SUPPORTED_ARCH ${ALL_CFI_SUPPORTED_ARCH}) filter_available_targets(ESAN_SUPPORTED_ARCH ${ALL_ESAN_SUPPORTED_ARCH}) + filter_available_targets(HARDENED_ALLOCATOR_SUPPORTED_ARCH + ${ALL_HARDENED_ALLOCATOR_SUPPORTED_ARCH}) endif() if (MSVC) @@ -513,3 +519,11 @@ else() set(COMPILER_RT_HAS_ESAN FALSE) endif() + +if (COMPILER_RT_HAS_SANITIZER_COMMON AND HARDENED_ALLOCATOR_SUPPORTED_ARCH AND + OS_NAME MATCHES "Linux") + set(COMPILER_RT_HAS_HARDENED_ALLOCATOR TRUE) +else() + set(COMPILER_RT_HAS_HARDENED_ALLOCATOR FALSE) +endif() + Index: projects/compiler-rt/lib/CMakeLists.txt =================================================================== --- projects/compiler-rt/lib/CMakeLists.txt +++ projects/compiler-rt/lib/CMakeLists.txt @@ -52,4 +52,8 @@ if(COMPILER_RT_HAS_ESAN) add_subdirectory(esan) endif() + + if(COMPILER_RT_HAS_HARDENED_ALLOCATOR) + add_subdirectory(hardened_allocator) + endif() endif() Index: projects/compiler-rt/lib/hardened_allocator/CMakeLists.txt =================================================================== --- projects/compiler-rt/lib/hardened_allocator/CMakeLists.txt +++ projects/compiler-rt/lib/hardened_allocator/CMakeLists.txt @@ -0,0 +1,32 @@ +add_custom_target(hardened_allocator) + +include_directories(..) + +set(HARDENED_ALLOCATOR_CFLAGS ${SANITIZER_COMMON_CFLAGS}) +append_rtti_flag(OFF HARDENED_ALLOCATOR_CFLAGS) +list(APPEND HARDENED_ALLOCATOR_CFLAGS -msse4.2) + +set(HARDENED_ALLOCATOR_SOURCES + scudo_allocator.cc + scudo_flags.cc + scudo_malloc_linux.cc + scudo_new_delete.cc + scudo_rtl.cc + scudo_utils.cc) + +if(COMPILER_RT_HAS_HARDENED_ALLOCATOR) + foreach(arch ${HARDENED_ALLOCATOR_SUPPORTED_ARCH}) + add_compiler_rt_runtime(clang_rt.hardened_allocator + STATIC + ARCHS ${arch} + SOURCES ${HARDENED_ALLOCATOR_SOURCES} + $ + $ + $ + CFLAGS ${HARDENED_ALLOCATOR_CFLAGS} + PARENT_TARGET hardened_allocator) + endforeach() +endif() + +add_dependencies(compiler-rt hardened_allocator) + Index: projects/compiler-rt/lib/hardened_allocator/scudo_allocator.h =================================================================== --- projects/compiler-rt/lib/hardened_allocator/scudo_allocator.h +++ projects/compiler-rt/lib/hardened_allocator/scudo_allocator.h @@ -0,0 +1,98 @@ +//===-- scudo_allocator.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Header for scudo_allocator.cc. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_ALLOCATOR_H_ +#define SCUDO_ALLOCATOR_H_ + +#ifndef __x86_64__ +# error "The Scudo hardened allocator currently only supports on x86_64." +#endif + +#include "scudo_flags.h" + +#include "sanitizer_common/sanitizer_allocator.h" + +namespace __scudo { + +// We have to redefine CHECK_IMPL, as the __sanitizer one involves calling a +// CheckFailedCallback function, which could be abused by a potential attacker. +#ifdef CHECK_IMPL +#undef CHECK_IMPL +#endif + +#define CHECK_IMPL(c1, op, c2) \ + do { \ + __sanitizer::u64 v1 = (u64)(c1); \ + __sanitizer::u64 v2 = (u64)(c2); \ + if (UNLIKELY(!(v1 op v2))) \ + __scudo::CheckFailed(__FILE__, __LINE__, \ + "(" #c1 ") " #op " (" #c2 ")", v1, v2); \ + } while (false) \ +/**/ + +// We will also use our own CheckFailed and Die functions, once again to avoid +// the __sanitizer ones that have callbacks. +void NORETURN +CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); +void NORETURN Die(); + +enum AllocType : u8 { + FromMalloc = 0, // Memory block came from malloc, realloc, calloc, etc. + FromNew = 1, // Memory block came from operator new. + FromNewArray = 2, // Memory block came from operator new []. + FromMemalign = 3, // Memory block came from memalign, posix_memalign, etc. +}; + +struct AllocatorOptions { + u32 QuarantineSizeMb; + u32 ThreadLocalQuarantineSizeKb; + bool MayReturnNull; + bool DeallocationTypeMismatch; + bool DeleteSizeMismatch; + bool ZeroContents; + + void SetFrom(const Flags *f, const CommonFlags *cf); + void CopyTo(Flags *f, CommonFlags *cf) const; +}; + +void InitializeAllocator(const AllocatorOptions &options); +void DrainQuarantine(); + +const uptr AllocatorSpace = ~0ULL; +const uptr AllocatorSize = 0x10000000000ULL; +const uptr MinAlignmentLog = 4; // 16 bytes for x64 +const uptr MaxAlignmentLog = 24; + +typedef DefaultSizeClassMap SizeClassMap; +typedef SizeClassAllocator64 + PrimaryAllocator; +typedef SizeClassAllocatorLocalCache AllocatorCache; +typedef LargeMmapAllocator<> SecondaryAllocator; +typedef CombinedAllocator + ScudoAllocator; + +void *scudoMalloc(uptr Size, AllocType Type); +void scudoFree(void *Ptr, AllocType Type); +void scudoSizedFree(void *Ptr, uptr Size, AllocType Type); +void *scudoRealloc(void *Ptr, uptr Size); +void *scudoCalloc(uptr NMemB, uptr Size); +void *scudoMemalign(uptr Alignment, uptr Size); +void *scudoValloc(uptr Size); +void *scudoPvalloc(uptr Size); +int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size); +void *scudoAlignedAlloc(uptr Alignment, uptr Size); +uptr scudoMallocUsableSize(void *Ptr); + +} // namespace __scudo + +#endif // SCUDO_ALLOCATOR_H_ Index: projects/compiler-rt/lib/hardened_allocator/scudo_allocator.cc =================================================================== --- projects/compiler-rt/lib/hardened_allocator/scudo_allocator.cc +++ projects/compiler-rt/lib/hardened_allocator/scudo_allocator.cc @@ -0,0 +1,585 @@ +//===-- scudo_allocator.cc --------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Scudo Hardened Allocator implementation. +/// It uses the sanitizer_common allocator as a base and aims at mitigating +/// heap corruption vulnerabilities. It provides a checksum-guarded chunk +/// header, a delayed free list, and additional sanity checks. +/// +//===----------------------------------------------------------------------===// + +#include "scudo_allocator.h" +#include "scudo_utils.h" + +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_quarantine.h" + +#include +#include +#include + +#include + +namespace __scudo { + +void NORETURN Die() { + if (common_flags()->abort_on_error) + Abort(); + internal__exit(common_flags()->exitcode); +} + +void NORETURN CheckFailed(const char *file, int line, const char *cond, + u64 v1, u64 v2) { + // FIXME: currently using sanitizer's Printf. We might want to use + // something less complex to avoid potential issues. + Printf("CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond, v1, v2); + Die(); +} + +static ScudoAllocator &getAllocator(); + +static thread_local Xorshift128Plus Prng; +// Global static cookie, initialized at start-up. +static u64 Cookie; + +enum ChunkState : u8 { + ChunkAvailable = 0, + ChunkAllocated = 1, + ChunkQuarantine = 2 +}; + +typedef unsigned __int128 PackedHeader; + +// Our header requires 128-bit of storage on x64 (the only platform supported +// as of now), which fits nicely with the alignment requirements. +// Having the offset saves us from using functions such as GetBlockBegin, that +// is fairly costly. Our first implementation used the MetaData as well, which +// offers the advantage of being stored away from the chunk itself, but +// accessing it was costly as well. +// The header will be atomically loaded and stored using the 16-byte primitives +// offered by the platform (likely requires cmpxchg16b support). +struct UnpackedHeader { + // 1st 8 bytes + u16 checksum : 16; + u64 requested_size : 40; // Needed for reallocation purposes. + u8 state : 2; // available, allocated, or quarantined + u8 alloc_type : 2; // malloc, new, new[], or memalign + u8 unused_0_ : 4; + // 2nd 8 bytes + u64 offset : 20; // Offset from the beginning of the backend + // allocation to the beginning chunk itself, in + // multiples of MinAlignment. See comment about its + // maximum value and test in Initialize. + u64 unused_1_ : 28; + u16 salt : 16; +}; + +COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader)); + +const uptr ChunkHeaderSize = sizeof(PackedHeader); + +struct ScudoChunk : UnpackedHeader { + // We can't use the offset member of the chunk itself, as we would double + // fetch it without any warranty that it wouldn't have been tampered. To + // prevent this, we work with a local copy of the header. + void *AllocBeg(UnpackedHeader *Header) { + return reinterpret_cast( + reinterpret_cast(this) - (Header->offset << MinAlignmentLog)); + } + + // CRC32 checksum of the Chunk pointer and its ChunkHeader. + // It currently uses the Intel Nehalem SSE4.2 crc32 64-bit instruction. + u16 Checksum(UnpackedHeader *Header) const { + u64 HeaderHolder[2]; + memcpy(HeaderHolder, Header, sizeof(HeaderHolder)); + u64 Crc = _mm_crc32_u64(Cookie, reinterpret_cast(this)); + // This is somewhat of a shortcut. The checksum is stored in the 16 least + // significant bits of the first 8 bytes of the header, hence zero-ing + // those bits out. It would be more valid to zero the checksum field of the + // UnpackedHeader, but would require holding an additional copy of it. + Crc = _mm_crc32_u64(Crc, HeaderHolder[0] & 0xffffffffffff0000ULL); + Crc = _mm_crc32_u64(Crc, HeaderHolder[1]); + return static_cast(Crc); + } + + // Loads and unpacks the header, verifying the checksum in the process. + void loadHeader(UnpackedHeader *unpacked_header) const { + PackedHeader packed_header; + __atomic_load(reinterpret_cast(this), &packed_header, + __ATOMIC_RELAXED); + *unpacked_header = bit_cast(packed_header); + if (unpacked_header->checksum != Checksum(unpacked_header)) { + Printf("ERROR: corrupted chunk header at address %p\n", this); + Die(); + } + } + + // Packs and stores the header, computing the checksum in the process. + void storeHeader(UnpackedHeader *new_unpacked_header) { + new_unpacked_header->checksum = Checksum(new_unpacked_header); + PackedHeader new_packed_header = + bit_cast(*new_unpacked_header); + __atomic_store(reinterpret_cast(this), &new_packed_header, + __ATOMIC_RELAXED); + } + + // Packs and stores the header, computing the checksum in the process. We + // compare the current header with the expected provided one to ensure that + // we are not being raced by a corruption occurring in another thread. + void compareExchangeHeader(UnpackedHeader *new_unpacked_header, + UnpackedHeader *old_unpacked_header) { + new_unpacked_header->checksum = Checksum(new_unpacked_header); + PackedHeader new_packed_header = + bit_cast(*new_unpacked_header); + PackedHeader old_packed_header = + bit_cast(*old_unpacked_header); + if (!__atomic_compare_exchange(reinterpret_cast(this), + &old_packed_header, + &new_packed_header, + false, + __ATOMIC_RELAXED, + __ATOMIC_RELAXED)) { + Printf("ERROR: race on chunk header at address %p\n", this); + Die(); + } + } +}; + +static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT; +static thread_local bool ThreadInited; +static pthread_key_t pkey; +static thread_local AllocatorCache cache; + +static void teardownThread(void *p) { + uptr v = reinterpret_cast(p); + // The glibc POSIX thread-local-storage deallocation routine calls user + // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS. + // We want to be called last since other destructors might call free and the + // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the + // quarantine and swallowing the cache. + if (v < PTHREAD_DESTRUCTOR_ITERATIONS) { + pthread_setspecific(pkey, reinterpret_cast(v + 1)); + return; + } + DrainQuarantine(); + getAllocator().DestroyCache(&cache); +} + +static void initGlobal() { + pthread_key_create(&pkey, teardownThread); +} + +static void NOINLINE initThread() { + pthread_once(&GlobalInited, initGlobal); + pthread_setspecific(pkey, reinterpret_cast(1)); + getAllocator().InitCache(&cache); + ThreadInited = true; +} + +struct QuarantineCallback { + explicit QuarantineCallback(AllocatorCache *cache) + : cache_(cache) {} + + // Chunk recycling function, returns a quarantined chunk to the backend. + void Recycle(ScudoChunk *chunk) { + UnpackedHeader header; + chunk->loadHeader(&header); + if (header.state != ChunkQuarantine) { + Printf("ERROR: invalid chunk state when recycling address %p\n", + chunk); + Die(); + } + void *ptr = chunk->AllocBeg(&header); + getAllocator().Deallocate(cache_, ptr); + } + + /// Internal quarantine allocation and deallocation functions. + void *Allocate(uptr size) { + // The internal quarantine memory cannot be protected by us. But the only + // structures allocated are QuarantineBatch, that are 8KB for x64. So we + // will use mmap for those, and given that Deallocate doesn't pass a size + // in, we enforce the size of the allocation to be sizeof(QuarantineBatch). + // TODO(kostyak): switching to mmap impacts greatly performances, we have + // to find another solution + // CHECK_EQ(size, sizeof(QuarantineBatch)); + // return MmapOrDie(size, "QuarantineBatch"); + return getAllocator().Allocate(cache_, size, 1, false); + } + + void Deallocate(void *ptr) { + // UnmapOrDie(ptr, sizeof(QuarantineBatch)); + getAllocator().Deallocate(cache_, ptr); + } + + AllocatorCache *cache_; +}; + +typedef Quarantine ScudoQuarantine; +typedef ScudoQuarantine::Cache QuarantineCache; +static thread_local QuarantineCache quarantine_cache; + +void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { + MayReturnNull = cf->allocator_may_return_null; + QuarantineSizeMb = f->QuarantineSizeMb; + ThreadLocalQuarantineSizeKb = f->ThreadLocalQuarantineSizeKb; + DeallocationTypeMismatch = f->DeallocationTypeMismatch; + DeleteSizeMismatch = f->DeleteSizeMismatch; + ZeroContents = f->ZeroContents; +} + +void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) const { + cf->allocator_may_return_null = MayReturnNull; + f->QuarantineSizeMb = QuarantineSizeMb; + f->ThreadLocalQuarantineSizeKb = ThreadLocalQuarantineSizeKb; + f->DeallocationTypeMismatch = DeallocationTypeMismatch; + f->DeleteSizeMismatch = DeleteSizeMismatch; + f->ZeroContents = ZeroContents; +} + +struct Allocator { + static const uptr MaxAllowedMallocSize = 1ULL << 40; + static const uptr MinAlignment = 1 << MinAlignmentLog; + static const uptr MaxAlignment = 1 << MaxAlignmentLog; // 16 MB + + ScudoAllocator allocator; + ScudoQuarantine quarantine; + + bool DeallocationTypeMismatch; + bool ZeroContents; + bool DeleteSizeMismatch; + + explicit Allocator(LinkerInitialized) + : quarantine(LINKER_INITIALIZED) {} + + void Initialize(const AllocatorOptions &options) { + // Currently SSE 4.2 support is required. This might change late. + CHECK(testCPUFeature(SSE4_2)); // for crc32 + + // Verify that the header offset field can hold the maximum offset. In the + // worst case scenario, the backend allocation is already aligned on + // MaxAlignment, so in order to store the header and still be aligned, we + // add an extra MaxAlignment. As a result, the offset from the beginning of + // the backend allocation to the chunk will be MaxAlignment - + // ChunkHeaderSize. + UnpackedHeader Header = {}; + uptr MaximumOffset = (MaxAlignment - ChunkHeaderSize) >> MinAlignmentLog; + Header.offset = MaximumOffset; + if (Header.offset != MaximumOffset) { + Printf("ERROR: the maximum possible offset doesn't fit in the header\n"); + Die(); + } + + DeallocationTypeMismatch = options.DeallocationTypeMismatch; + DeleteSizeMismatch = options.DeleteSizeMismatch; + ZeroContents = options.ZeroContents; + allocator.Init(options.MayReturnNull); + quarantine.Init(static_cast(options.QuarantineSizeMb) << 20, + static_cast( + options.ThreadLocalQuarantineSizeKb) << 10); + Cookie = Prng.Next(); + } + + // Allocates a chunk. + void *Allocate(uptr size, uptr alignment, AllocType alloc_type) { + if (UNLIKELY(!ThreadInited)) + initThread(); + if (!IsPowerOfTwo(alignment)) { + Printf("ERROR: malloc alignment is not a power of 2\n"); + Die(); + } + if (alignment > MaxAlignment) + return allocator.ReturnNullOrDie(); + if (alignment < MinAlignment) + alignment = MinAlignment; + if (size == 0) + size = 1; + if (size >= MaxAllowedMallocSize) + return allocator.ReturnNullOrDie(); + uptr rounded_size = RoundUpTo(size, MinAlignment); + uptr extra_bytes = ChunkHeaderSize; + if (alignment > MinAlignment) + extra_bytes += alignment; + uptr needed_size = rounded_size + extra_bytes; + if (needed_size >= MaxAllowedMallocSize) + return allocator.ReturnNullOrDie(); + void *ptr = allocator.Allocate(&cache, needed_size, MinAlignment); + if (!ptr) + return allocator.ReturnNullOrDie(); + + uptr alloc_beg = reinterpret_cast(ptr); + uptr chunk_beg = alloc_beg + ChunkHeaderSize; + if (!IsAligned(chunk_beg, alignment)) + chunk_beg = RoundUpTo(chunk_beg, alignment); + CHECK_LE(chunk_beg + size, alloc_beg + needed_size); + ScudoChunk *chunk = + reinterpret_cast(chunk_beg - ChunkHeaderSize); + UnpackedHeader header = {}; + header.state = ChunkAllocated; + header.offset = (chunk_beg - ChunkHeaderSize - alloc_beg) + >> MinAlignmentLog; + header.alloc_type = alloc_type; + header.requested_size = size; + header.salt = static_cast(Prng.Next()); + chunk->storeHeader(&header); + void *user_ptr = reinterpret_cast(chunk_beg); + if (ZeroContents && allocator.FromPrimary(ptr)) + memset(user_ptr, 0, size); + // TODO(kostyak): hooks sound like a terrible idea security wise but might + // be needed for things to work properly? + // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(user_ptr, size); + return user_ptr; + } + + // Deallocates a Chunk, which means adding it to the delayed free list (or + // Quarantine). + void Deallocate(void *user_ptr, uptr delete_size, AllocType alloc_type) { + if (UNLIKELY(!ThreadInited)) + initThread(); + // TODO(kostyak): see hook comment above + // if (&__sanitizer_free_hook) __sanitizer_free_hook(user_ptr); + if (!user_ptr) + return; + uptr chunk_beg = reinterpret_cast(user_ptr); + if (!IsAligned(chunk_beg, MinAlignment)) { + Printf("ERROR: attempted to deallocate a chunk not properly aligned at " + "address %p\n", user_ptr); + Die(); + } + ScudoChunk *chunk = + reinterpret_cast(chunk_beg - ChunkHeaderSize); + UnpackedHeader old_header; + chunk->loadHeader(&old_header); + if (old_header.state != ChunkAllocated) { + Printf("ERROR: invalid chunk state when deallocating address %p\n", + chunk); + Die(); + } + UnpackedHeader new_header = old_header; + new_header.state = ChunkQuarantine; + chunk->compareExchangeHeader(&new_header, &old_header); + if (DeallocationTypeMismatch) { + // The deallocation type has to match the allocation one + if (new_header.alloc_type != alloc_type) { + // With the exception of memalign'd Chunks, that can be still be free'd + if (new_header.alloc_type != FromMemalign || alloc_type != FromMalloc) { + Printf("ERROR: allocation type mismatch on address %p\n", chunk); + Die(); + } + } + } + uptr size = new_header.requested_size; + if (DeleteSizeMismatch) { + if (delete_size && delete_size != size) { + Printf("ERROR: invalid sized delete on chunk at address %p\n", chunk); + Die(); + } + } + quarantine.Put(&quarantine_cache, QuarantineCallback(&cache), chunk, size); + } + + // Returns the actual usable size of a chunk. Since this requires loading the + // header, we will return it in the second parameter, as it can be required + // by the caller to perform additional processing. + uptr UsableSize(const void *ptr, UnpackedHeader *header) { + if (UNLIKELY(!ThreadInited)) + initThread(); + if (!ptr) + return 0; + uptr chunk_beg = reinterpret_cast(ptr); + ScudoChunk *chunk = + reinterpret_cast(chunk_beg - ChunkHeaderSize); + chunk->loadHeader(header); + // Getting the usable size of a chunk only makes sense if it's allocated. + if (header->state != ChunkAllocated) { + Printf("ERROR: attempted to size a non-allocated chunk at address %p\n", + chunk); + Die(); + } + uptr size = allocator.GetActuallyAllocatedSize(chunk->AllocBeg(header)); + // UsableSize works as malloc_usable_size, which is also what (AFAIU) + // tcmalloc's MallocExtension::GetAllocatedSize aims at providing. This + // means we will return the size of the chunk from the user beginning to + // the end of the 'user' allocation, hence us subtracting the header size + // and the offset from the size. + if (size == 0) + return size; + return size - ChunkHeaderSize - (header->offset << MinAlignmentLog); + } + + // Helper function that doesn't care about the header. + uptr UsableSize(const void *Ptr) { + UnpackedHeader Header; + return UsableSize(Ptr, &Header); + } + + // Reallocates a chunk. We can save on a new allocation if the new requested + // size still fits in the chunk. + void *Reallocate(void *old_ptr, uptr new_size) { + if (UNLIKELY(!ThreadInited)) + initThread(); + UnpackedHeader old_header; + uptr usable_size = UsableSize(old_ptr, &old_header); + uptr chunk_beg = reinterpret_cast(old_ptr); + ScudoChunk *chunk = + reinterpret_cast(chunk_beg - ChunkHeaderSize); + if (old_header.alloc_type != FromMalloc) { + Printf("ERROR: invalid chunk type when reallocating address %p\n", + chunk); + Die(); + } + UnpackedHeader new_header = old_header; + // The new size still fits in the current chunk. + if (new_size <= usable_size) { + // TODO(kostyak): zero the additional contents + new_header.requested_size = new_size; + chunk->compareExchangeHeader(&new_header, &old_header); + return old_ptr; + } + // Otherwise, we have to allocate a new chunk and copy the contents of the + // old one. + void *new_ptr = Allocate(new_size, MinAlignment, FromMalloc); + if (new_ptr) { + uptr old_size = old_header.requested_size; + memcpy(new_ptr, old_ptr, Min(new_size, old_size)); + new_header.state = ChunkQuarantine; + chunk->compareExchangeHeader(&new_header, &old_header); + quarantine.Put(&quarantine_cache, QuarantineCallback(&cache), chunk, + old_size); + } + return new_ptr; + } + + void *Calloc(uptr NMemB, uptr Size) { + uptr Total = NMemB * Size; + if (Size != 0 && Total / Size != NMemB) // Overflow check + return allocator.ReturnNullOrDie(); + void *Ptr = Allocate(Total, MinAlignment, FromMalloc); + // If ZeroContents, the content of the chunk has already been zero'd out. + if (!ZeroContents && Ptr && allocator.FromPrimary(Ptr)) + memset(Ptr, 0, UsableSize(Ptr)); + return Ptr; + } + + void DrainQuarantine() { + quarantine.Drain(&quarantine_cache, QuarantineCallback(&cache)); + } +}; + +static Allocator instance(LINKER_INITIALIZED); + +static ScudoAllocator &getAllocator() { + return instance.allocator; +} + +void InitializeAllocator(const AllocatorOptions &options) { + instance.Initialize(options); +} + +void DrainQuarantine() { + instance.DrainQuarantine(); +} + +void *scudoMalloc(uptr Size, AllocType Type) { + return instance.Allocate(Size, Allocator::MinAlignment, Type); +} + +void scudoFree(void *Ptr, AllocType Type) { + instance.Deallocate(Ptr, 0, Type); +} + +void scudoSizedFree(void *Ptr, uptr Size, AllocType Type) { + instance.Deallocate(Ptr, Size, Type); +} + +void *scudoRealloc(void *Ptr, uptr Size) { + if (!Ptr) + return instance.Allocate(Size, Allocator::MinAlignment, FromMalloc); + if (Size == 0) { + instance.Deallocate(Ptr, 0, FromMalloc); + return nullptr; + } + return instance.Reallocate(Ptr, Size); +} + +void *scudoCalloc(uptr NMemB, uptr Size) { + return instance.Calloc(NMemB, Size); +} + +void *scudoValloc(uptr Size) { + return instance.Allocate(Size, GetPageSizeCached(), FromMemalign); +} + +void *scudoMemalign(uptr Alignment, uptr Size) { + return instance.Allocate(Size, Alignment, FromMemalign); +} + +void *scudoPvalloc(uptr Size) { + uptr PageSize = GetPageSizeCached(); + Size = RoundUpTo(Size, PageSize); + if (Size == 0) { + // pvalloc(0) should allocate one page. + Size = PageSize; + } + return instance.Allocate(Size, PageSize, FromMemalign); +} + +int scudoPosixMemalign(void **MemPtr, uptr Alignment, uptr Size) { + *MemPtr = instance.Allocate(Size, Alignment, FromMemalign); + return 0; +} + +void *scudoAlignedAlloc(uptr Alignment, uptr Size) { + // size must be a multiple of the alignment. To avoid a division, we first + // make sure that alignment is a power of 2. + CHECK(IsPowerOfTwo(Alignment)); + CHECK_EQ((Size & (Alignment - 1)), 0); + return instance.Allocate(Size, Alignment, FromMalloc); +} + +uptr scudoMallocUsableSize(void *Ptr) { + return instance.UsableSize(Ptr); +} + +} // namespace __scudo + +using namespace __scudo; + +// MallocExtension helper functions + +uptr __sanitizer_get_current_allocated_bytes() { + uptr stats[AllocatorStatCount]; + getAllocator().GetStats(stats); + return stats[AllocatorStatAllocated]; +} + +uptr __sanitizer_get_heap_size() { + uptr stats[AllocatorStatCount]; + getAllocator().GetStats(stats); + return stats[AllocatorStatMapped]; +} + +uptr __sanitizer_get_free_bytes() { + return 1; +} + +uptr __sanitizer_get_unmapped_bytes() { + return 1; +} + +uptr __sanitizer_get_estimated_allocated_size(uptr size) { + return size; +} + +int __sanitizer_get_ownership(const void *p) { + return instance.UsableSize(p) != 0; +} + +uptr __sanitizer_get_allocated_size(const void *p) { + return instance.UsableSize(p); +} Index: projects/compiler-rt/lib/hardened_allocator/scudo_flags.h =================================================================== --- projects/compiler-rt/lib/hardened_allocator/scudo_flags.h +++ projects/compiler-rt/lib/hardened_allocator/scudo_flags.h @@ -0,0 +1,33 @@ +//===-- scudo_flags.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Header for scudo_flags.cc. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_FLAGS_H_ +#define SCUDO_FLAGS_H_ + +namespace __scudo { + +struct Flags { +#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name; +#include "scudo_flags.inc" +#undef SCUDO_FLAG + + void SetDefaults(); +}; + +Flags *flags(); + +void InitializeFlags(); + +} // namespace __scudo + +#endif // SCUDO_FLAGS_H_ Index: projects/compiler-rt/lib/hardened_allocator/scudo_flags.cc =================================================================== --- projects/compiler-rt/lib/hardened_allocator/scudo_flags.cc +++ projects/compiler-rt/lib/hardened_allocator/scudo_flags.cc @@ -0,0 +1,81 @@ +//===-- scudo_flags.cc ------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Hardened Allocator flag parsing logic. +/// +//===----------------------------------------------------------------------===// + +#include "scudo_flags.h" + +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_flag_parser.h" + +namespace __scudo { + +Flags scudo_flags_dont_use_directly; // use via flags(). + +void Flags::SetDefaults() { +#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "scudo_flags.inc" +#undef SCUDO_FLAG +} + +static void RegisterScudoFlags(FlagParser *parser, Flags *f) { +#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +#include "scudo_flags.inc" +#undef SCUDO_FLAG +} + +void InitializeFlags() { + SetCommonFlagsDefaults(); + { + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.exitcode = 1; + OverrideCommonFlags(cf); + } + Flags *f = flags(); + f->SetDefaults(); + + FlagParser scudo_parser; + RegisterScudoFlags(&scudo_parser, f); + RegisterCommonFlags(&scudo_parser); + + scudo_parser.ParseString(GetEnv("SCUDO_OPTIONS")); + + InitializeCommonFlags(); + + // Sanity checks and default settings for the Quarantine parameters. + + if (f->QuarantineSizeMb < 0) { + const int DefaultQuarantineSizeMb = 64; + f->QuarantineSizeMb = DefaultQuarantineSizeMb; + } + // We enforce an upper limit for the quarantine size of 4Gb. + if (f->QuarantineSizeMb > (4 * 1024)) { + Printf("ERROR: the quarantine size is too large\n"); + Die(); + } + if (f->ThreadLocalQuarantineSizeKb < 0) { + const int DefaultThreadLocalQuarantineSizeKb = 1024; + f->ThreadLocalQuarantineSizeKb = DefaultThreadLocalQuarantineSizeKb; + } + // And an upper limit of 128Mb for the thread quarantine cache. + if (f->ThreadLocalQuarantineSizeKb > (128 * 1024)) { + Printf("ERROR: the per thread quarantine cache size is too large\n"); + Die(); + } +} + +Flags *flags() { + return &scudo_flags_dont_use_directly; +} + +} Index: projects/compiler-rt/lib/hardened_allocator/scudo_flags.inc =================================================================== --- projects/compiler-rt/lib/hardened_allocator/scudo_flags.inc +++ projects/compiler-rt/lib/hardened_allocator/scudo_flags.inc @@ -0,0 +1,35 @@ +//===-- scudo_flags.inc -----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Hardened Allocator runtime flags. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_FLAG +# error "Define SCUDO_FLAG prior to including this file!" +#endif + +SCUDO_FLAG(int, QuarantineSizeMb, 64, + "Size (in Mb) of quarantine used to delay the actual deallocation " + "of chunks. Lower value may reduce memory usage but decrease the " + "effectiveness of the mitigation.") + +SCUDO_FLAG(int, ThreadLocalQuarantineSizeKb, 1024, + "Size (in Kb) of per-thread cache used to offload the global " + "quarantine. Lower value may reduce memory usage but might increase " + "the contention on the global quarantine.") + +SCUDO_FLAG(bool, DeallocationTypeMismatch, true, + "Report errors on malloc/delete, new/free, new/delete[], etc.") + +SCUDO_FLAG(bool, DeleteSizeMismatch, true, + "Report errors on mismatch between size of new and delete.") + +SCUDO_FLAG(bool, ZeroContents, false, + "Zero chunk contents on allocation and deallocation.") Index: projects/compiler-rt/lib/hardened_allocator/scudo_malloc_linux.cc =================================================================== --- projects/compiler-rt/lib/hardened_allocator/scudo_malloc_linux.cc +++ projects/compiler-rt/lib/hardened_allocator/scudo_malloc_linux.cc @@ -0,0 +1,75 @@ +//===-- scudo_malloc_linux.cc -----------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Linux specific malloc interception functions. +/// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_LINUX + +#include "scudo_allocator.h" + +#include "interception/interception.h" + +using namespace __scudo; + +INTERCEPTOR(void, free, void *ptr) { + scudoFree(ptr, FromMalloc); +} + +INTERCEPTOR(void, cfree, void *ptr) { + scudoFree(ptr, FromMalloc); +} + +INTERCEPTOR(void*, malloc, uptr size) { + return scudoMalloc(size, FromMalloc); +} + +INTERCEPTOR(void*, realloc, void *ptr, uptr size) { + return scudoRealloc(ptr, size); +} + +INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { + return scudoCalloc(nmemb, size); +} + +INTERCEPTOR(void*, valloc, uptr size) { + return scudoValloc(size); +} + +INTERCEPTOR(void*, memalign, uptr alignment, uptr size) { + return scudoMemalign(alignment, size); +} + +INTERCEPTOR(void*, __libc_memalign, uptr alignment, uptr size) { + return scudoMemalign(alignment, size); +} + +INTERCEPTOR(void*, pvalloc, uptr size) { + return scudoPvalloc(size); +} + +INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) { + return scudoAlignedAlloc(alignment, size); +} + +INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { + return scudoPosixMemalign(memptr, alignment, size); +} + +INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { + return scudoMallocUsableSize(ptr); +} + +INTERCEPTOR(int, mallopt, int cmd, int value) { + return -1; +} + +#endif // SANITIZER_LINUX Index: projects/compiler-rt/lib/hardened_allocator/scudo_new_delete.cc =================================================================== --- projects/compiler-rt/lib/hardened_allocator/scudo_new_delete.cc +++ projects/compiler-rt/lib/hardened_allocator/scudo_new_delete.cc @@ -0,0 +1,69 @@ +//===-- scudo_new_delete.cc -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Interceptors for operators new and delete. +/// +//===----------------------------------------------------------------------===// + +#include "scudo_allocator.h" + +#include "interception/interception.h" + +#include + +using namespace __scudo; + +#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE + +// Fake std::nothrow_t to avoid including . +namespace std { +struct nothrow_t {}; +} // namespace std + +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size) { + return scudoMalloc(size, FromNew); +} +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size) { + return scudoMalloc(size, FromNewArray); +} +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size, std::nothrow_t const&) { + return scudoMalloc(size, FromNew); +} +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size, std::nothrow_t const&) { + return scudoMalloc(size, FromNewArray); +} + +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr) NOEXCEPT { + return scudoFree(ptr, FromNew); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr) NOEXCEPT { + return scudoFree(ptr, FromNewArray); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, std::nothrow_t const&) NOEXCEPT { + return scudoFree(ptr, FromNew); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, std::nothrow_t const&) NOEXCEPT { + return scudoFree(ptr, FromNewArray); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, size_t size) NOEXCEPT { + scudoSizedFree(ptr, size, FromNew); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, size_t size) NOEXCEPT { + scudoSizedFree(ptr, size, FromNewArray); +} Index: projects/compiler-rt/lib/hardened_allocator/scudo_rtl.cc =================================================================== --- projects/compiler-rt/lib/hardened_allocator/scudo_rtl.cc +++ projects/compiler-rt/lib/hardened_allocator/scudo_rtl.cc @@ -0,0 +1,51 @@ +//===-- scudo_rtl.cc --------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Main file for the Hardened Allocator runtime library. +/// +//===----------------------------------------------------------------------===// + +#include "scudo_allocator.h" + +namespace __scudo { + +bool scudo_inited; +bool scudo_init_is_running; + +static void ScudoInitInternal() { + if (LIKELY(scudo_inited)) + return; + SanitizerToolName = "Scudo"; + CHECK(!scudo_init_is_running && "Scudo init calls itself!"); + scudo_init_is_running = true; + + InitializeFlags(); + + AllocatorOptions allocator_options; + allocator_options.SetFrom(flags(), common_flags()); + InitializeAllocator(allocator_options); + + scudo_inited = true; + scudo_init_is_running = false; +} + +} // namespace __scudo + +using namespace __scudo; + +void __scudo_init() { + ScudoInitInternal(); +} + +#if SANITIZER_CAN_USE_PREINIT_ARRAY +__attribute__((section(".preinit_array"), used)) + void (*__local_scudo_preinit)(void) = __scudo_init; +#else +#error "Can't use .preinit_array" +#endif Index: projects/compiler-rt/lib/hardened_allocator/scudo_utils.h =================================================================== --- projects/compiler-rt/lib/hardened_allocator/scudo_utils.h +++ projects/compiler-rt/lib/hardened_allocator/scudo_utils.h @@ -0,0 +1,58 @@ +//===-- scudo_utils.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Header for scudo_utils.cc. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_UTILS_H_ +#define SCUDO_UTILS_H_ + +#include + +#include "sanitizer_common/sanitizer_common.h" + +namespace __scudo { + +template +inline Dest bit_cast(const Source& source) { + static_assert(sizeof(Dest) == sizeof(Source), "Sizes are not equal!"); + Dest dest; + memcpy(&dest, &source, sizeof(dest)); + return dest; +} + +enum CPUFeature { + SSE4_2 = 0, + RDRAND = 1, + ENUM_CPUFEATURE_MAX +}; +bool testCPUFeature(CPUFeature feature); + +// Tiny PRNG based on https://en.wikipedia.org/wiki/Xorshift#xorshift.2B +// The state (128 bits) will be stored in thread local storage. +struct Xorshift128Plus { + public: + Xorshift128Plus(); + u64 Next() { + u64 x = state_0_; + const u64 y = state_1_; + state_0_ = y; + x ^= x << 23; + state_1_ = x ^ y ^ (x >> 17) ^ (y >> 26); + return state_1_ + y; + } + private: + u64 state_0_; + u64 state_1_; +}; + +} // namespace __scudo + +#endif // SCUDO_UTILS_H_ Index: projects/compiler-rt/lib/hardened_allocator/scudo_utils.cc =================================================================== --- projects/compiler-rt/lib/hardened_allocator/scudo_utils.cc +++ projects/compiler-rt/lib/hardened_allocator/scudo_utils.cc @@ -0,0 +1,121 @@ +//===-- scudo_utils.cc ------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Platform specific utility functions. +/// +//===----------------------------------------------------------------------===// + +#include "scudo_utils.h" + +#include +#include // for std::chrono::high_resolution_clock +#include // for std::hash +#include // for std::this_thread + +namespace __scudo { + +typedef struct { + u32 eax; + u32 ebx; + u32 ecx; + u32 edx; +} CPUIDInfo; + +static void getCPUID(CPUIDInfo *info, u32 leaf, u32 subleaf) +{ + asm volatile("cpuid" + : "=a" (info->eax), "=b" (info->ebx), "=c" (info->ecx), "=d" (info->edx) + : "a" (leaf), "c" (subleaf) + ); +} + +// Returns true is the CPU is a "GenuineIntel" or "AuthenticAMD" +static bool isSupportedCPU() +{ + CPUIDInfo Info; + + getCPUID(&Info, 0, 0); + if (memcmp(reinterpret_cast(&Info.ebx), "Genu", 4) == 0 && + memcmp(reinterpret_cast(&Info.edx), "ineI", 4) == 0 && + memcmp(reinterpret_cast(&Info.ecx), "ntel", 4) == 0) { + return true; + } + if (memcmp(reinterpret_cast(&Info.ebx), "Auth", 4) == 0 && + memcmp(reinterpret_cast(&Info.edx), "enti", 4) == 0 && + memcmp(reinterpret_cast(&Info.ecx), "cAMD", 4) == 0) { + return true; + } + return false; +} + +bool testCPUFeature(CPUFeature feature) +{ + static bool InfoInitialized = false; + static CPUIDInfo kCPUInfo = {}; + + if (InfoInitialized == false) { + if (isSupportedCPU() == true) + getCPUID(&kCPUInfo, 1, 0); + else + UNIMPLEMENTED(); + InfoInitialized = true; + } + switch (feature) { + case SSE4_2: + return ((kCPUInfo.ecx >> 20) & 0x1) != 0; + case RDRAND: + return ((kCPUInfo.ecx >> 30) & 0x1) != 0; + default: + break; + } + return false; +} + +static u64 getRdTSC() { + // Clang: __builtin_readcyclecounter + u64 low, high; + __asm__ volatile("rdtsc" : "=a" (low), "=d" (high)); + return (high << 32) | low; +} + +// RdRand64 will call rdrand if the feature is available for the CPU, otherwise +// it will use a XOR of the cycle counter, the high resolution clock and the +// thread ID hash. +static u64 RdRand64() { + static s8 HasRdRand = -1; + if (HasRdRand == -1) { + HasRdRand = testCPUFeature(RDRAND); + } + if (HasRdRand == 1) { + u64 rnd; + u8 carry; + + // Normally we need only one execution, but if the first attempt failed, + // we fall back to retries. + for (int c = 10; c != 0; --c) { + asm volatile("rdrand %0; setc %1": "=r" (rnd), "=qm" (carry)); + if (carry != 0) + return rnd; // Success + } + + // All attempts failed. + Printf("WARNING: RDRAND failed. Falling back.\n"); + } + std::hash hasher; + return getRdTSC() ^ hasher(std::this_thread::get_id()) ^ + std::chrono::high_resolution_clock::now().time_since_epoch().count(); +} + +// Default constructor for Xorshift128Plus seeds the state with RdRand64 +Xorshift128Plus::Xorshift128Plus() { + state_0_ = RdRand64(); + state_1_ = RdRand64(); +} + +} // namespace __scudo Index: projects/compiler-rt/test/CMakeLists.txt =================================================================== --- projects/compiler-rt/test/CMakeLists.txt +++ projects/compiler-rt/test/CMakeLists.txt @@ -73,6 +73,9 @@ if(COMPILER_RT_HAS_ESAN) add_subdirectory(esan) endif() + if(COMPILER_RT_HAS_HARDENED_ALLOCATOR) + add_subdirectory(hardened_allocator) + endif() endif() if(COMPILER_RT_STANDALONE_BUILD) Index: projects/compiler-rt/test/hardened_allocator/CMakeLists.txt =================================================================== --- projects/compiler-rt/test/hardened_allocator/CMakeLists.txt +++ projects/compiler-rt/test/hardened_allocator/CMakeLists.txt @@ -0,0 +1,21 @@ +set(HARDENED_ALLOCATOR_LIT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) +set(HARDENED_ALLOCATOR_LIT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) + + +set(HARDENED_ALLOCATOR_TEST_DEPS ${SANITIZER_COMMON_LIT_TEST_DEPS}) +if(NOT COMPILER_RT_STANDALONE_BUILD) + list(APPEND HARDENED_ALLOCATOR_TEST_DEPS hardened_allocator) +endif() + +configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in + ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg + ) + +add_lit_testsuite(check-hardened_allocator + "Running the Hardened Allocator tests" + ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS ${HARDENED_ALLOCATOR_TEST_DEPS}) +set_target_properties(check-hardened_allocator PROPERTIES FOLDER + "Hardened Allocator tests") + Index: projects/compiler-rt/test/hardened_allocator/alignment.cc =================================================================== --- projects/compiler-rt/test/hardened_allocator/alignment.cc +++ projects/compiler-rt/test/hardened_allocator/alignment.cc @@ -0,0 +1,25 @@ +// RUN: %clang_scudo %s -o %t +// RUN: not %run %t pointers 2>&1 | FileCheck %s + +// Tests that a non-16-byte aligned pointer will trigger the associated error +// on deallocation. + +#include +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + assert(argc == 2); + if (!strcmp(argv[1], "pointers")) { + void *p = malloc(1U << 16); + if (!p) + return 1; + free(reinterpret_cast(reinterpret_cast(p) | 8)); + } + return 0; +} + +// CHECK: ERROR: attempted to deallocate a chunk not properly aligned Index: projects/compiler-rt/test/hardened_allocator/double-free.cc =================================================================== --- projects/compiler-rt/test/hardened_allocator/double-free.cc +++ projects/compiler-rt/test/hardened_allocator/double-free.cc @@ -0,0 +1,49 @@ +// RUN: %clang_scudo %s -o %t +// RUN: not %run %t malloc 2>&1 | FileCheck %s +// RUN: not %run %t new 2>&1 | FileCheck %s +// RUN: not %run %t newarray 2>&1 | FileCheck %s +// RUN: not %run %t memalign 2>&1 | FileCheck %s + +// Tests double-free error on pointers allocated with different allocation +// functions. + +#include +#include +#include + +int main(int argc, char **argv) +{ + assert(argc == 2); + if (!strcmp(argv[1], "malloc")) { + void *p = malloc(sizeof(int)); + if (!p) + return 1; + free(p); + free(p); + } + if (!strcmp(argv[1], "new")) { + int *p = new int; + if (!p) + return 1; + delete p; + delete p; + } + if (!strcmp(argv[1], "newarray")) { + int *p = new int[8]; + if (!p) + return 1; + delete[] p; + delete[] p; + } + if (!strcmp(argv[1], "memalign")) { + void *p = nullptr; + posix_memalign(&p, 0x100, sizeof(int)); + if (!p) + return 1; + free(p); + free(p); + } + return 0; +} + +// CHECK: ERROR: invalid chunk state when deallocating address Index: projects/compiler-rt/test/hardened_allocator/lit.cfg =================================================================== --- projects/compiler-rt/test/hardened_allocator/lit.cfg +++ projects/compiler-rt/test/hardened_allocator/lit.cfg @@ -0,0 +1,39 @@ +# -*- Python -*- + +import os + +# Setup config name. +config.name = 'Hardened Allocator' + +# Setup source root. +config.test_source_root = os.path.dirname(__file__) + +# Path to the static library +base_lib = os.path.join(config.compiler_rt_libdir, + "libclang_rt.hardened_allocator-%s.a" % config.target_arch) +whole_archive = "-Wl,-whole-archive %s -Wl,-no-whole-archive " % base_lib + +# Test suffixes. +config.suffixes = ['.c', '.cc', '.cpp', '.m', '.mm', '.ll', '.test'] + +# C flags. +c_flags = ["-std=c++11", + "-lstdc++", + "-ldl", + "-lrt", + "-pthread", + "-latomic", #for __atomic_load_16, __atomic_store_16, __atomic_compare_exchange_16 + "-fPIE", + "-pie", + "-O0"] + +def build_invocation(compile_flags): + return " " + " ".join([config.clang] + compile_flags) + " " + +# Add clang substitutions. +config.substitutions.append( ("%clang_scudo ", + build_invocation(c_flags) + whole_archive ) ) + +# Hardened Allocator tests are currently supported on Linux only. +if config.host_os not in ['Linux']: + config.unsupported = True Index: projects/compiler-rt/test/hardened_allocator/lit.site.cfg.in =================================================================== --- projects/compiler-rt/test/hardened_allocator/lit.site.cfg.in +++ projects/compiler-rt/test/hardened_allocator/lit.site.cfg.in @@ -0,0 +1,7 @@ +@LIT_SITE_CFG_IN_HEADER@ + +# Load common config for all compiler-rt lit tests. +lit_config.load_config(config, "@COMPILER_RT_BINARY_DIR@/test/lit.common.configured") + +# Load tool-specific config that would do the real work. +lit_config.load_config(config, "@HARDENED_ALLOCATOR_LIT_SOURCE_DIR@/lit.cfg") Index: projects/compiler-rt/test/hardened_allocator/malloc.cc =================================================================== --- projects/compiler-rt/test/hardened_allocator/malloc.cc +++ projects/compiler-rt/test/hardened_allocator/malloc.cc @@ -0,0 +1,27 @@ +// RUN: %clang_scudo %s -o %t +// RUN: %run %t 2>&1 + +// Tests that a regular workflow of allocation, memory fill and free works as +// intended. Also tests that a zero-sized allocation succeeds. + +#include +#include +#include + +int main(int argc, char **argv) +{ + void *p; + size_t size = 1U << 8; + + p = malloc(size); + if (!p) + return 1; + memset(p, 'A', size); + free(p); + p = malloc(0); + if (!p) + return 1; + free(p); + + return 0; +} Index: projects/compiler-rt/test/hardened_allocator/memalign.cc =================================================================== --- projects/compiler-rt/test/hardened_allocator/memalign.cc +++ projects/compiler-rt/test/hardened_allocator/memalign.cc @@ -0,0 +1,42 @@ +// RUN: %clang_scudo %s -o %t +// RUN: %run %t valid 2>&1 +// RUN: not %run %t invalid 2>&1 | FileCheck %s + +// Tests that the various aligned allocation functions work as intended. Also +// tests for the condition where the alignment is not a power of 2. + +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + void *p; + size_t alignment = 1U << 12; + size_t size = alignment; + + assert(argc == 2); + if (!strcmp(argv[1], "valid")) { + p = memalign(alignment, size); + if (!p) + return 1; + free(p); + p = nullptr; + posix_memalign(&p, alignment, size); + if (!p) + return 1; + free(p); + p = aligned_alloc(alignment, size); + if (!p) + return 1; + free(p); + } + if (!strcmp(argv[1], "invalid")) { + p = memalign(alignment - 1, size); + free(p); + } + return 0; +} + +// CHECK: ERROR: malloc alignment is not a power of 2 Index: projects/compiler-rt/test/hardened_allocator/mismatch.cc =================================================================== --- projects/compiler-rt/test/hardened_allocator/mismatch.cc +++ projects/compiler-rt/test/hardened_allocator/mismatch.cc @@ -0,0 +1,41 @@ +// RUN: %clang_scudo %s -o %t +// RUN: SCUDO_OPTIONS=DeallocationTypeMismatch=1 not %run %t mallocdel 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=DeallocationTypeMismatch=0 %run %t mallocdel 2>&1 +// RUN: SCUDO_OPTIONS=DeallocationTypeMismatch=1 not %run %t newfree 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=DeallocationTypeMismatch=0 %run %t newfree 2>&1 +// RUN: SCUDO_OPTIONS=DeallocationTypeMismatch=1 not %run %t memaligndel 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=DeallocationTypeMismatch=0 %run %t memaligndel 2>&1 + +// Tests that type mismatches between allocation and deallocation functions are +// caught when the related option is set. + +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + assert(argc == 2); + if (!strcmp(argv[1], "mallocdel")) { + int *p = (int *)malloc(16); + if (!p) + return 1; + delete p; + } + if (!strcmp(argv[1], "newfree")) { + int *p = new int; + if (!p) + return 1; + free((void *)p); + } + if (!strcmp(argv[1], "memaligndel")) { + int *p = (int *)memalign(0x10, 0x10); + if (!p) + return 1; + delete p; + } + return 0; +} + +// CHECK: ERROR: allocation type mismatch on address Index: projects/compiler-rt/test/hardened_allocator/overflow.cc =================================================================== --- projects/compiler-rt/test/hardened_allocator/overflow.cc +++ projects/compiler-rt/test/hardened_allocator/overflow.cc @@ -0,0 +1,38 @@ +// RUN: %clang_scudo %s -o %t +// RUN: not %run %t malloc 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=QuarantineSizeMb=1 not %run %t quarantine 2>&1 | FileCheck %s + +// Tests that header corruption of an allocated or quarantined chunk is caught. + +#include +#include +#include + +int main(int argc, char **argv) +{ + assert(argc == 2); + if (!strcmp(argv[1], "malloc")) { + // Simulate a header corruption of an allocated chunk (1-bit) + void *p = malloc(1U << 4); + if (!p) + return 1; + ((char *)p)[-1] ^= 1; + free(p); + } + if (!strcmp(argv[1], "quarantine")) { + void *p = malloc(1U << 4); + if (!p) + return 1; + free(p); + // Simulate a header corruption of a quarantined chunk + ((char *)p)[-2] ^= 1; + // Trigger the quarantine recycle + for (int i = 0; i < 0x100; i++) { + p = malloc(1U << 16); + free(p); + } + } + return 0; +} + +// CHECK: ERROR: corrupted chunk header at address Index: projects/compiler-rt/test/hardened_allocator/quarantine.cc =================================================================== --- projects/compiler-rt/test/hardened_allocator/quarantine.cc +++ projects/compiler-rt/test/hardened_allocator/quarantine.cc @@ -0,0 +1,43 @@ +// RUN: %clang_scudo %s -o %t +// RUN: SCUDO_OPTIONS=QuarantineSizeMb=1 %run %t 2>&1 + +// Tests that the quarantine prevents a chunk from being reused right away. +// Also tests that a chunk will eventually become available again for +// allocation when the recycling criteria has been met. + +#include +#include +#include + +int main(int argc, char **argv) +{ + void *p, *old_p; + size_t size = 1U << 16; + + // The delayed freelist will prevent a chunk from being available right away + p = malloc(size); + if (!p) + return 1; + old_p = p; + free(p); + p = malloc(size); + if (!p) + return 1; + if (old_p == p) + return 1; + free(p); + + // Eventually the chunk should become available again + bool found = false; + for (int i = 0; i < 0x100 && found == false; i++) { + p = malloc(size); + if (!p) + return 1; + found = (p == old_p); + free(p); + } + if (found == false) + return 1; + + return 0; +} Index: projects/compiler-rt/test/hardened_allocator/realloc.cc =================================================================== --- projects/compiler-rt/test/hardened_allocator/realloc.cc +++ projects/compiler-rt/test/hardened_allocator/realloc.cc @@ -0,0 +1,69 @@ +// RUN: %clang_scudo %s -o %t +// RUN: %run %t pointers 2>&1 +// RUN: %run %t contents 2>&1 +// RUN: not %run %t memalign 2>&1 | FileCheck %s + +// Tests that our reallocation function returns the same pointer when the +// requested size can fit into the previously allocated chunk. Also tests that +// a new chunk is returned if the size is greater, and that the contents of the +// chunk are left unchanged. +// As a final test, make sure that a chunk allocated by memalign cannot be +// reallocated. + +#include +#include +#include + +int main(int argc, char **argv) +{ + void *p, *old_p; + size_t size = 32; + + assert(argc == 2); + if (!strcmp(argv[1], "pointers")) { + old_p = p = realloc(nullptr, size); + if (!p) + return 1; + size = malloc_usable_size(p); + // Our realloc implementation will return the same pointer if the size + // requested is lower or equal to the usable size of the associated chunk. + p = realloc(p, size - 1); + if (p != old_p) + return 1; + p = realloc(p, size); + if (p != old_p) + return 1; + // And a new one if the size is greater. + p = realloc(p, size + 1); + if (p == old_p) + return 1; + // A size of 0 will free the chunk and return nullptr. + p = realloc(p, 0); + if (p) + return 1; + old_p = nullptr; + } + if (!strcmp(argv[1], "contents")) { + p = realloc(nullptr, size); + if (!p) + return 1; + for (int i = 0; i < size; i++) + reinterpret_cast(p)[i] = 'A'; + p = realloc(p, size + 1); + // The contents of the reallocated chunk must match the original one. + for (int i = 0; i < size; i++) + if (reinterpret_cast(p)[i] != 'A') + return 1; + } + if (!strcmp(argv[1], "memalign")) { + // A chunk coming from memalign cannot be reallocated. + p = memalign(16, size); + if (!p) + return 1; + p = realloc(p, size); + free(p); + } + return 0; +} + +// CHECK: ERROR: invalid chunk type when reallocating address Index: projects/compiler-rt/test/hardened_allocator/sized-delete.cc =================================================================== --- projects/compiler-rt/test/hardened_allocator/sized-delete.cc +++ projects/compiler-rt/test/hardened_allocator/sized-delete.cc @@ -0,0 +1,40 @@ +// RUN: %clang_scudo -fsized-deallocation %s -o %t +// RUN: SCUDO_OPTIONS=DeleteSizeMismatch=1 %run %t gooddel 2>&1 +// RUN: SCUDO_OPTIONS=DeleteSizeMismatch=1 not %run %t baddel 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=DeleteSizeMismatch=0 %run %t baddel 2>&1 +// RUN: SCUDO_OPTIONS=DeleteSizeMismatch=1 %run %t gooddelarr 2>&1 +// RUN: SCUDO_OPTIONS=DeleteSizeMismatch=1 not %run %t baddelarr 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=DeleteSizeMismatch=0 %run %t baddelarr 2>&1 + +// Ensures that the sized delete operator errors out when the appropriate +// option is passed and the sizes do not match between allocation and +// deallocation functions. + +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + assert(argc == 2); + if (!strcmp(argv[1], "gooddel")) { + long long *p = new long long; + operator delete(p, sizeof(long long)); + } + if (!strcmp(argv[1], "baddel")) { + long long *p = new long long; + operator delete(p, 2); + } + if (!strcmp(argv[1], "gooddelarr")) { + char *p = new char[64]; + operator delete[](p, 64); + } + if (!strcmp(argv[1], "baddelarr")) { + char *p = new char[63]; + operator delete[](p, 64); + } + return 0; +} + +// CHECK: ERROR: invalid sized delete on chunk at address Index: projects/compiler-rt/test/hardened_allocator/sizes.cc =================================================================== --- projects/compiler-rt/test/hardened_allocator/sizes.cc +++ projects/compiler-rt/test/hardened_allocator/sizes.cc @@ -0,0 +1,61 @@ +// RUN: %clang_scudo %s -o %t +// RUN: SCUDO_OPTIONS=allocator_may_return_null=0 not %run %t malloc 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=allocator_may_return_null=1 %run %t malloc 2>&1 +// RUN: SCUDO_OPTIONS=allocator_may_return_null=0 not %run %t calloc 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=allocator_may_return_null=1 %run %t calloc 2>&1 +// RUN: %run %t usable 2>&1 + +// Tests for various edge cases related to sizes, notably the maximum size the +// allocator can allocate. Tests that an integer overflow in the parameters of +// calloc is caught. + +#include +#include +#include +#include + +#include + +int main(int argc, char **argv) +{ + assert(argc == 2); + if (!strcmp(argv[1], "malloc")) { + // Currently the maximum size the allocator can allocate is 1ULL<<40 bytes. + size_t size = std::numeric_limits::max(); + void *p = malloc(size); + if (p) + return 1; + size = (1ULL << 40) - 16; + p = malloc(size); + if (p) + return 1; + } + if (!strcmp(argv[1], "calloc")) { + // Trigger an overflow in calloc. + size_t size = std::numeric_limits::max(); + void *p = calloc((size / 0x1000) + 1, 0x1000); + if (p) + return 1; + } + if (!strcmp(argv[1], "usable")) { + // Playing with the actual usable size of a chunk. + void *p = malloc(1007); + if (!p) + return 1; + size_t size = malloc_usable_size(p); + if (size < 1007) + return 1; + memset(p, 'A', size); + p = realloc(p, 2014); + if (!p) + return 1; + size = malloc_usable_size(p); + if (size < 2014) + return 1; + memset(p, 'B', size); + free(p); + } + return 0; +} + +// CHECK: allocator is terminating the process