Index: cmake/config-ix.cmake =================================================================== --- cmake/config-ix.cmake +++ cmake/config-ix.cmake @@ -270,6 +270,7 @@ set(ALL_SAFESTACK_SUPPORTED_ARCH ${X86} ${X86_64} ${ARM64} ${MIPS32} ${MIPS64}) set(ALL_CFI_SUPPORTED_ARCH ${X86} ${X86_64} ${MIPS64}) set(ALL_ESAN_SUPPORTED_ARCH ${X86_64}) +set(ALL_HARDENED_ALLOCATOR_SUPPORTED_ARCH ${X86_64}) if(APPLE) include(CompilerRTDarwinUtils) @@ -507,6 +508,9 @@ list_intersect(ESAN_SUPPORTED_ARCH ALL_ESAN_SUPPORTED_ARCH SANITIZER_COMMON_SUPPORTED_ARCH) + list_intersect(HARDENED_ALLOCATOR_SUPPORTED_ARCH + ALL_HARDENED_ALLOCATOR_SUPPORTED_ARCH + SANITIZER_COMMON_SUPPORTED_ARCH) else() # Architectures supported by compiler-rt libraries. filter_available_targets(BUILTIN_SUPPORTED_ARCH @@ -530,6 +534,8 @@ ${ALL_SAFESTACK_SUPPORTED_ARCH}) filter_available_targets(CFI_SUPPORTED_ARCH ${ALL_CFI_SUPPORTED_ARCH}) filter_available_targets(ESAN_SUPPORTED_ARCH ${ALL_ESAN_SUPPORTED_ARCH}) + filter_available_targets(HARDENED_ALLOCATOR_SUPPORTED_ARCH + ${ALL_HARDENED_ALLOCATOR_SUPPORTED_ARCH}) endif() if (MSVC) @@ -644,3 +650,11 @@ else() set(COMPILER_RT_HAS_ESAN FALSE) endif() + +if (COMPILER_RT_HAS_SANITIZER_COMMON AND HARDENED_ALLOCATOR_SUPPORTED_ARCH AND + OS_NAME MATCHES "Linux") + set(COMPILER_RT_HAS_HARDENED_ALLOCATOR TRUE) +else() + set(COMPILER_RT_HAS_HARDENED_ALLOCATOR FALSE) +endif() + Index: lib/CMakeLists.txt =================================================================== --- lib/CMakeLists.txt +++ lib/CMakeLists.txt @@ -52,4 +52,8 @@ if(COMPILER_RT_HAS_ESAN) add_subdirectory(esan) endif() + + if(COMPILER_RT_HAS_HARDENED_ALLOCATOR) + add_subdirectory(hardened_allocator) + endif() endif() Index: lib/hardened_allocator/CMakeLists.txt =================================================================== --- lib/hardened_allocator/CMakeLists.txt +++ lib/hardened_allocator/CMakeLists.txt @@ -0,0 +1,32 @@ +add_custom_target(hardened_allocator) + +include_directories(..) + +set(HARDENED_ALLOCATOR_CFLAGS ${SANITIZER_COMMON_CFLAGS}) +append_rtti_flag(OFF HARDENED_ALLOCATOR_CFLAGS) +list(APPEND HARDENED_ALLOCATOR_CFLAGS -msse4.2) + +set(HARDENED_ALLOCATOR_SOURCES + scudo_allocator.cc + scudo_flags.cc + scudo_malloc_linux.cc + scudo_new_delete.cc + scudo_rtl.cc + scudo_utils.cc) + +if(COMPILER_RT_HAS_HARDENED_ALLOCATOR) + foreach(arch ${HARDENED_ALLOCATOR_SUPPORTED_ARCH}) + add_compiler_rt_runtime(clang_rt.hardened_allocator + STATIC + ARCHS ${arch} + SOURCES ${HARDENED_ALLOCATOR_SOURCES} + $ + $ + $ + CFLAGS ${HARDENED_ALLOCATOR_CFLAGS} + PARENT_TARGET hardened_allocator) + endforeach() +endif() + +add_dependencies(compiler-rt hardened_allocator) + Index: lib/hardened_allocator/scudo_allocator.h =================================================================== --- lib/hardened_allocator/scudo_allocator.h +++ lib/hardened_allocator/scudo_allocator.h @@ -0,0 +1,95 @@ +//===-- scudo_allocator.h ---------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Header for scudo_allocator.cc. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_ALLOCATOR_H_ +#define SCUDO_ALLOCATOR_H_ + +#include "scudo_flags.h" + +#include "sanitizer_common/sanitizer_allocator.h" + +namespace __scudo { + +// We have to redefine CHECK_IMPL, as the __sanitizer one involves calling a +// CheckFailedCallback function, which could be abused by a potential attacker. +#ifdef CHECK_IMPL +#undef CHECK_IMPL +#endif + +#define CHECK_IMPL(c1, op, c2) \ + do { \ + __sanitizer::u64 v1 = (u64)(c1); \ + __sanitizer::u64 v2 = (u64)(c2); \ + if (UNLIKELY(!(v1 op v2))) \ + __scudo::CheckFailed(__FILE__, __LINE__, \ + "(" #c1 ") " #op " (" #c2 ")", v1, v2); \ + } while (false) \ +/**/ + +// We will also use our own CheckFailed and Die functions, once again to avoid +// the __sanitizer ones that have callbacks. +void NORETURN +CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); +void NORETURN Die(); + +extern bool scudo_inited; + +enum AllocType : u8 { + FROM_MALLOC = 0, // Memory block came from malloc, realloc, calloc, etc. + FROM_NEW = 1, // Memory block came from operator new. + FROM_NEWARRAY = 2, // Memory block came from operator new []. + FROM_MEMALIGN = 3, // Memory block came from memalign, posix_memalign, etc. +}; + +struct AllocatorOptions { + u32 quarantine_size_mb; + bool may_return_null; + bool alloc_dealloc_mismatch; + bool new_delete_size_mismatch; + bool zero_chunk_contents; + + void SetFrom(const Flags *f, const CommonFlags *cf); + void CopyTo(Flags *f, CommonFlags *cf); +}; + +void InitializeAllocator(const AllocatorOptions &options); +void DrainQuarantine(); + +const uptr kAllocatorSpace = ~0ULL; +const uptr kAllocatorSize = 0x10000000000ULL; +static const uptr kMinAlignmentLog = 4; // 16 bytes for x64 +static const uptr kMaxAlignmentLog = 24; + +typedef DefaultSizeClassMap SizeClassMap; +typedef SizeClassAllocator64 PrimaryAllocator; +typedef SizeClassAllocatorLocalCache AllocatorCache; +typedef LargeMmapAllocator<> SecondaryAllocator; +typedef CombinedAllocator ScudoAllocator; + +void *scudo_malloc(uptr size, AllocType alloc_type); +void scudo_free(void *ptr, AllocType alloc_type); +void scudo_sized_free(void *ptr, uptr size, AllocType alloc_type); +void *scudo_realloc(void *ptr, uptr size); +void *scudo_calloc(uptr nmemb, uptr size); +void *scudo_memalign(uptr alignment, uptr size); +void *scudo_valloc(uptr size); +void *scudo_pvalloc(uptr size); +int scudo_posix_memalign(void **memptr, uptr alignment, uptr size); +void *scudo_aligned_alloc(uptr alignment, uptr size); +uptr scudo_malloc_usable_size(void *ptr); + +} // namespace __scudo + +#endif // SCUDO_ALLOCATOR_H_ Index: lib/hardened_allocator/scudo_allocator.cc =================================================================== --- lib/hardened_allocator/scudo_allocator.cc +++ lib/hardened_allocator/scudo_allocator.cc @@ -0,0 +1,605 @@ +//===-- scudo_allocator.cc --------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Scudo Hardened Allocator implementation. +/// It uses the sanitizer_common allocator as a base and aims at mitigating +/// heap corruption vulnerabilities. It provides a checksum-guarded chunk +/// header, a delayed free list, and various other security improvements. +/// +//===----------------------------------------------------------------------===// + +#include "scudo_allocator.h" +#include "scudo_utils.h" + +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_quarantine.h" + +#include +#include +#include + +#include + +namespace __scudo { + +void NORETURN Die() { + // TODO(kostyak): do we want to be able to abort? + if (common_flags()->abort_on_error) + Abort(); + internal__exit(common_flags()->exitcode); +} + +void NORETURN CheckFailed(const char *file, int line, const char *cond, + u64 v1, u64 v2) { + Printf("CHECK failed: %s:%d %s (%lld, %lld)\n", file, line, cond, v1, v2); + Die(); +} + +static ScudoAllocator &get_allocator(); + +// TODO(kostyak): currently we have one prng per thread, is it necessary? +static thread_local Xorshift128Plus prng; +// Global cookie +static u64 cookie; + +enum ChunkState : u8 { + CHUNK_AVAILABLE = 0, + CHUNK_ALLOCATED = 1, + CHUNK_QUARANTINE = 2 +}; + +typedef unsigned __int128 u128; +typedef u128 PackedHeader; + +// Our header requires 128-bit of storage on x64 (the only platform supported +// as of now), which fits nicely with the alignment requirements. It's storing +// a 16-bit checksum, the user requested size for that chunk (needed for +// reallocation purposes), its state (available, allocated, or quarantined), +// the allocation type (malloc, new, new[], or memalign), if that chunk if +// 'offseted' (ie: if the chunk beginning is different than the backend +// allocation beginning), the related offset field, and a salt. +// Having the offset saves us from using functions such as GetBlockBegin, that +// is fairly costly. Our first implementation used the MetaData as well, which +// offers the advantage of being stored away from the chunk itself, but +// accessing it was costly as well. +// The header will be atomically loaded and stored using the 16-byte primitives +// offered by the platform (likely requires cmpxchg16b support). +struct UnpackedHeader { + // 1st 8 bytes + u128 checksum : 16; + u128 requested_size : 40; + u128 state : 2; + u128 alloc_type : 2; + u128 with_offset : 1; + u128 unused_0_ : 3; + // 2nd 8 bytes + u128 offset : 24; + u128 unused_1_ : 24; + u128 salt : 16; +}; + +COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(u128)); +COMPILER_CHECK(sizeof(PackedHeader) == sizeof(u128)); + +static const uptr kChunkHeaderSize = sizeof(PackedHeader); + +struct ScudoChunk : UnpackedHeader { + uptr UserBeg() { + return reinterpret_cast(this) + kChunkHeaderSize; + } + // We can't use the offset member of the chunk itself, as we would double + // fetch it without any warranty that it wouldn't have been tampered. To + // prevent this, we work with a stack based copy of the header, hence the + // following static function. + static void *AllocBeg(ScudoChunk *chunk, UnpackedHeader *header) { + if (header->with_offset == 0) { + return reinterpret_cast(chunk); + } else { + return reinterpret_cast( + chunk->UserBeg() - (header->offset << kMinAlignmentLog)); + } + } + + // CRC32 checksum of the Chunk pointer and its ChunkHeader. + // It currently uses the Intel Nehalem SSE4.2 crc32 64-bit instruction. + // TODO(kostyak): use a BSD checksum for the non-sse4.2 processors? + __attribute__((target("sse4.2"))) + u16 Checksum(UnpackedHeader *header) { + u64 header_holder[2]; + memcpy(header_holder, header, sizeof(header_holder)); + u64 crc = _mm_crc32_u64(0, reinterpret_cast(this)); + // This is somewhat of a shortcut. The checksum is store in the 16 least + // significant bits of the header, hence zero-ing those bits out. It would + // be more valid to zero the checksum field of the UnpackedHeader, but + // would require holding an additional copy of it. + crc = _mm_crc32_u64(crc, header_holder[0] & 0xffffffffffff0000ULL); + crc = _mm_crc32_u64(crc, header_holder[1]); + return static_cast(crc ^ cookie); + } + + // Loads and unpacks the header, verifying the checksum in the process. + void LoadHeader(UnpackedHeader *unpacked_header) { + PackedHeader packed_header; + __atomic_load(reinterpret_cast(this), &packed_header, + __ATOMIC_ACQUIRE); + *unpacked_header = bit_cast(packed_header); + if (unpacked_header->checksum != Checksum(unpacked_header)) { + Printf("ERROR: corrupted chunk header at address %p\n", this); + Die(); + } + } + + // Packs and stores the header, computing the checksum in the process. If a + // header is provided for comparison, we check that it is still the same. + // A different one would mean that another thread would have raced us. + void StoreHeader(UnpackedHeader *new_unpacked_header, + UnpackedHeader *old_unpacked_header) { + new_unpacked_header->checksum = Checksum(new_unpacked_header); + PackedHeader new_packed_header = + bit_cast(*new_unpacked_header); + if (old_unpacked_header == nullptr) { + __atomic_store(reinterpret_cast(this), &new_packed_header, + __ATOMIC_RELEASE); + } else { + PackedHeader old_packed_header = + bit_cast(*old_unpacked_header); + if (!__atomic_compare_exchange(reinterpret_cast(this), + &old_packed_header, + &new_packed_header, + false, + __ATOMIC_ACQUIRE, + __ATOMIC_ACQUIRE)) { + Printf("ERROR: race on chunk header at address %p\n", this); + Die(); + } + } + } +}; + +static pthread_once_t global_inited = PTHREAD_ONCE_INIT; +static thread_local bool thread_inited; +static pthread_key_t pkey; +static thread_local AllocatorCache cache; + +static void thread_dtor(void *p) { + uptr v = reinterpret_cast(p); + if (v < PTHREAD_DESTRUCTOR_ITERATIONS) { + pthread_setspecific(pkey, reinterpret_cast(v + 1)); + return; + } + DrainQuarantine(); + get_allocator().DestroyCache(&cache); +} + +static void global_init() { + pthread_key_create(&pkey, thread_dtor); +} + +static void NOINLINE thread_init() { + pthread_once(&global_inited, global_init); + pthread_setspecific(pkey, reinterpret_cast(1)); + get_allocator().InitCache(&cache); + thread_inited = true; +} + +#if 0 +// As useful as this would be, __cxxabiv1::__cxa_thread_atexit uses new & +// delete (or calloc and free depending on the implementation), making it +// hard to use in the context of the allocator cache (delete is called post +// cache destruction!). As this would allow us to get rid of the platform +// specific pthread code, this might be worth digging into. +struct ScudoAllocatorCache final : AllocatorCache { + ScudoAllocatorCache() { + get_allocator().InitCache(this); + } + ~ScudoAllocatorCache() { + DrainQuarantine(); + get_allocator().DestroyCache(this); + } +}; +static thread_local ScudoAllocatorCache cache; +#endif + +struct QuarantineCallback { + explicit QuarantineCallback(AllocatorCache *cache) + : cache_(cache) {} + + // Chunk recycling function, returns a quarantined chunk to the backend. + void Recycle(ScudoChunk *chunk) { + UnpackedHeader header; + chunk->LoadHeader(&header); + if (header.state != CHUNK_QUARANTINE) { + Printf("ERROR: invalid chunk state when recycling address %p\n", + chunk); + Die(); + } + void *ptr = ScudoChunk::AllocBeg(chunk, &header); + get_allocator().Deallocate(cache_, ptr); + } + + /// Internal quarantine allocation and deallocation functions. + void *Allocate(uptr size) { + // The internal quarantine memory cannot be protected by us. But the only + // structures allocated are QuarantineBatch, that are 8KB for x64. So we + // will use mmap for those, and given that Deallocate doesn't pass a size + // in, we enforce the size of the allocation to be sizeof(QuarantineBatch). + // TODO(kostyak): switching to mmap impacts greatly performances, we have + // to find another solution + void *ptr = get_allocator().Allocate(cache_, size, 1, false); + return ptr; + // CHECK_EQ(size, sizeof(QuarantineBatch)); + // return MmapOrDie(size, "QuarantineBatch"); + } + + void Deallocate(void *ptr) { + // UnmapOrDie(ptr, sizeof(QuarantineBatch)); + get_allocator().Deallocate(cache_, ptr); + } + + AllocatorCache *cache_; +}; + +typedef Quarantine ScudoQuarantine; +typedef ScudoQuarantine::Cache QuarantineCache; +// static thread_local QuarantineCache quarantine_cache; +static THREADLOCAL uptr quarantine_cache[4] = {}; +COMPILER_CHECK(sizeof(QuarantineCache) <= sizeof(quarantine_cache)); + +void AllocatorOptions::SetFrom(const Flags *f, const CommonFlags *cf) { + may_return_null = cf->allocator_may_return_null; + quarantine_size_mb = f->quarantine_size_mb; + alloc_dealloc_mismatch = f->alloc_dealloc_mismatch; + new_delete_size_mismatch = f->new_delete_size_mismatch; + zero_chunk_contents = f->zero_chunk_contents; +} + +void AllocatorOptions::CopyTo(Flags *f, CommonFlags *cf) { + cf->allocator_may_return_null = may_return_null; + f->quarantine_size_mb = quarantine_size_mb; + f->alloc_dealloc_mismatch = alloc_dealloc_mismatch; + f->new_delete_size_mismatch = new_delete_size_mismatch; + f->zero_chunk_contents = zero_chunk_contents; +} + +struct Allocator { + static const uptr kMaxAllowedMallocSize = 1ULL << 40; + static const uptr kMaxThreadLocalQuarantine = 1U << 20; + static const uptr kMinAlignment = 1 << kMinAlignmentLog; + static const uptr kMaxAlignment = 1 << kMaxAlignmentLog; // 16 MB + + ScudoAllocator allocator; + ScudoQuarantine quarantine; + + bool alloc_dealloc_mismatch; + bool zero_chunk_contents; + bool new_delete_size_mismatch; + + explicit Allocator(LinkerInitialized) + : quarantine(LINKER_INITIALIZED) {} + + void Initialize(const AllocatorOptions &options) { + CHECK(TestCPUFeature(SSE4_2)); // for crc32 + alloc_dealloc_mismatch = options.alloc_dealloc_mismatch; + new_delete_size_mismatch = options.new_delete_size_mismatch; + zero_chunk_contents = options.zero_chunk_contents; + allocator.Init(options.may_return_null); + quarantine.Init(static_cast(options.quarantine_size_mb) << 20, + kMaxThreadLocalQuarantine); + cookie = prng.Next(); + } + + // Allocates a chunk. + void *Allocate(uptr size, uptr alignment, AllocType alloc_type) { + if (UNLIKELY(!thread_inited)) + thread_init(); + if (!IsPowerOfTwo(alignment)) { + Printf("ERROR: alignment is not a power of 2\n"); + Die(); + } + if (alignment > kMaxAlignment) + return allocator.ReturnNullOrDie(); + if (alignment < kMinAlignment) + alignment = kMinAlignment; + if (size == 0) + size = 1; + if (size >= kMaxAllowedMallocSize) + return allocator.ReturnNullOrDie(); + uptr rounded_size = RoundUpTo(size, kMinAlignment); + uptr extra_bytes = kChunkHeaderSize; + if (alignment > kMinAlignment) + extra_bytes += alignment; + uptr needed_size = rounded_size + extra_bytes; + // CHECK_GE(needed_size, size); // Overflow cannot happen + if (needed_size >= kMaxAllowedMallocSize) + return allocator.ReturnNullOrDie(); + void *ptr = allocator.Allocate(&cache, needed_size, kMinAlignment); + if (ptr == nullptr) + return allocator.ReturnNullOrDie(); + + uptr alloc_beg = reinterpret_cast(ptr); + uptr chunk_beg = alloc_beg + kChunkHeaderSize; + if (!IsAligned(chunk_beg, alignment)) + chunk_beg = RoundUpTo(chunk_beg, alignment); + CHECK_LE(chunk_beg + size, alloc_beg + needed_size); + ScudoChunk *chunk = + reinterpret_cast(chunk_beg - kChunkHeaderSize); + UnpackedHeader header = {}; + header.state = CHUNK_ALLOCATED; + if (chunk_beg != alloc_beg + kChunkHeaderSize) { + header.with_offset = 1; + header.offset = (chunk_beg - alloc_beg) >> kMinAlignmentLog; + } + header.alloc_type = alloc_type; + header.requested_size = size; + header.salt = static_cast(prng.Next()); + chunk->StoreHeader(&header, nullptr); + void *user_ptr = reinterpret_cast(chunk_beg); + if (zero_chunk_contents && allocator.FromPrimary(ptr)) + memset(user_ptr, 0, size); + // TODO(kostyak): hooks sound like a terrible idea security wise but might + // be needed for things to work properly? + // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(user_ptr, size); + return user_ptr; + } + + // Deallocates a Chunk, which means adding it to the delayed free list (or + // Quarantine). + void Deallocate(void *user_ptr, uptr delete_size, AllocType alloc_type) { + if (UNLIKELY(!thread_inited)) + thread_init(); + // TODO(kostyak): see hook comment above + // if (&__sanitizer_free_hook) __sanitizer_free_hook(user_ptr); + if (user_ptr == nullptr) + return; + uptr chunk_beg = reinterpret_cast(user_ptr); + if (!IsAligned(chunk_beg, kMinAlignment)) { + Printf("ERROR: attempted to deallocate a chunk not properly aligned at " + "address %p\n", user_ptr); + Die(); + } + ScudoChunk *chunk = + reinterpret_cast(chunk_beg - kChunkHeaderSize); + UnpackedHeader old_header; + chunk->LoadHeader(&old_header); + if (old_header.state != CHUNK_ALLOCATED) { + Printf("ERROR: invalid chunk state when deallocating address %p\n", + chunk); + Die(); + } + UnpackedHeader new_header = old_header; + new_header.state = CHUNK_QUARANTINE; + chunk->StoreHeader(&new_header, &old_header); + if (alloc_dealloc_mismatch) { + // The deallocation type has to match the allocation one + if (new_header.alloc_type != alloc_type) { + // With the exception of memalign'd Chunks, that can be still be free'd + if (!(new_header.alloc_type == FROM_MEMALIGN && + alloc_type == FROM_MALLOC)) { + Printf("ERROR: allocation type mismatch on address %p\n", chunk); + Die(); + } + } + } + uptr size = new_header.requested_size; + if (new_delete_size_mismatch) { + if (delete_size && delete_size != size) { + Printf("ERROR: invalid sized delete on chunk at address %p\n", chunk); + Die(); + } + } + quarantine.Put(reinterpret_cast(quarantine_cache), + QuarantineCallback(&cache), chunk, size); + } + + // Returns the actual usable size of a chunk. Since this requires loading the + // header, we will return it in the second parameter, as it can be required + // by the caller to perform additional processing. + uptr UsableSize(const void *ptr, UnpackedHeader *header) { + if (UNLIKELY(!thread_inited)) + thread_init(); + if (ptr == nullptr) + return 0; + uptr chunk_beg = reinterpret_cast(ptr); + ScudoChunk *chunk = + reinterpret_cast(chunk_beg - kChunkHeaderSize); + chunk->LoadHeader(header); + // Getting the usable size of a chunk only makes sense if it's allocated. + if (header->state != CHUNK_ALLOCATED) { + Printf("ERROR: attempted to size a non-allocated chunk at address %p\n", + chunk); + } + uptr size = allocator.GetActuallyAllocatedSize( + ScudoChunk::AllocBeg(chunk, header)); + // UsableSize works as malloc_usable_size, which is also what (AFAIU) + // MallocExtension::GetAllocatedSize aims at providing. This means we will + // return the size of the chunk from the user beginning to the end of the + // 'user' allocation, hence us subtracting the header and|or offset from + // the size. + if (size == 0) { + return size; + } + if (header->with_offset == 0) { + return size - kChunkHeaderSize; + } + return size - (header->offset << kMinAlignmentLog); + } + + // Helper function that doesn't care about the header. + uptr UsableSize(const void *ptr) { + UnpackedHeader header; + return UsableSize(ptr, &header); + } + + // Reallocates a chunk. We can save on a new allocation if the new requested + // size still fits in the chunk. + void *Reallocate(void *old_ptr, uptr new_size) { + if (UNLIKELY(!thread_inited)) + thread_init(); + // CHECK(old_ptr && new_size); // Redundant with scudo_realloc + UnpackedHeader old_header; + uptr usable_size = UsableSize(old_ptr, &old_header); + uptr chunk_beg = reinterpret_cast(old_ptr); + ScudoChunk *chunk = + reinterpret_cast(chunk_beg - kChunkHeaderSize); + if (old_header.alloc_type != FROM_MALLOC) { + Printf("ERROR: invalid chunk type when reallocating address %p\n", + chunk); + Die(); + } + UnpackedHeader new_header = old_header; + // The new size still fits in the current chunk. + if (new_size <= usable_size) { + // TODO(kostyak): zero the additional contents + new_header.requested_size = new_size; + chunk->StoreHeader(&new_header, &old_header); + return old_ptr; + } + // Otherwise, we have to allocate a new chunk and copy the contents of the + // old one. + void *new_ptr = Allocate(new_size, kMinAlignment, FROM_MALLOC); + if (new_ptr) { + uptr old_size = old_header.requested_size; + memcpy(new_ptr, old_ptr, Min(new_size, old_size)); + new_header.state = CHUNK_QUARANTINE; + chunk->StoreHeader(&new_header, &old_header); + quarantine.Put(reinterpret_cast(quarantine_cache), + QuarantineCallback(&cache), chunk, old_size); + } + return new_ptr; + } + + void *Calloc(uptr nmemb, uptr size) { + uptr total = nmemb * size; + if (size != 0 && total / size != nmemb) // Overflow check + return allocator.ReturnNullOrDie(); + void *ptr = Allocate(total, kMinAlignment, FROM_MALLOC); + if (ptr && allocator.FromPrimary(ptr)) + memset(ptr, 0, total); + return ptr; + } + + void DrainQuarantine() { + quarantine.Drain(reinterpret_cast(quarantine_cache), + QuarantineCallback(&cache)); + } +}; + +static Allocator instance(LINKER_INITIALIZED); + +static ScudoAllocator &get_allocator() { + return instance.allocator; +} + +void InitializeAllocator(const AllocatorOptions &options) { + instance.Initialize(options); +} + +void DrainQuarantine() { + instance.DrainQuarantine(); +} + +void *scudo_malloc(uptr size, AllocType alloc_type) { + return instance.Allocate(size, Allocator::kMinAlignment, alloc_type); +} + +void scudo_free(void *ptr, AllocType alloc_type) { + instance.Deallocate(ptr, 0, alloc_type); +} + +void scudo_sized_free(void *ptr, uptr size, AllocType alloc_type) { + instance.Deallocate(ptr, size, alloc_type); +} + +void *scudo_realloc(void *ptr, uptr size) { + if (ptr == nullptr) + return instance.Allocate(size, Allocator::kMinAlignment, FROM_MALLOC); + if (size == 0) { + instance.Deallocate(ptr, 0, FROM_MALLOC); + return nullptr; + } + return instance.Reallocate(ptr, size); +} + +void *scudo_calloc(uptr nmemb, uptr size) { + return instance.Calloc(nmemb, size); +} + +void *scudo_valloc(uptr size) { + return instance.Allocate(size, GetPageSizeCached(), FROM_MEMALIGN); +} + +void *scudo_memalign(uptr alignment, uptr size) { + return instance.Allocate(size, alignment, FROM_MEMALIGN); +} + +void *scudo_pvalloc(uptr size) { + uptr PageSize = GetPageSizeCached(); + size = RoundUpTo(size, PageSize); + if (size == 0) { + // pvalloc(0) should allocate one page. + size = PageSize; + } + return instance.Allocate(size, PageSize, FROM_MEMALIGN); +} + +int scudo_posix_memalign(void **memptr, uptr alignment, uptr size) { + void *ptr = instance.Allocate(size, alignment, FROM_MEMALIGN); + *memptr = ptr; + return 0; +} + +void *scudo_aligned_alloc(uptr alignment, uptr size) { + // size must be a multiple of the alignment. To avoid a division, we first + // make sure that alignment is a power of 2. + CHECK(IsPowerOfTwo(alignment)); + CHECK_EQ((size & (alignment - 1)), 0); + return instance.Allocate(size, alignment, FROM_MALLOC); +} + +uptr scudo_malloc_usable_size(void *ptr) { + return instance.UsableSize(ptr); +} + +} // namespace __scudo + +using namespace __scudo; + +// MallocExtension helper functions + +uptr __sanitizer_get_current_allocated_bytes() { + uptr stats[AllocatorStatCount]; + get_allocator().GetStats(stats); + return stats[AllocatorStatAllocated]; +} + +uptr __sanitizer_get_heap_size() { + uptr stats[AllocatorStatCount]; + get_allocator().GetStats(stats); + return stats[AllocatorStatMapped]; +} + +uptr __sanitizer_get_free_bytes() { + return 1; +} + +uptr __sanitizer_get_unmapped_bytes() { + return 1; +} + +uptr __sanitizer_get_estimated_allocated_size(uptr size) { + return size; +} + +int __sanitizer_get_ownership(const void *p) { + return instance.UsableSize(p) != 0; +} + +uptr __sanitizer_get_allocated_size(const void *p) { + return instance.UsableSize(p); +} Index: lib/hardened_allocator/scudo_flags.h =================================================================== --- lib/hardened_allocator/scudo_flags.h +++ lib/hardened_allocator/scudo_flags.h @@ -0,0 +1,36 @@ +//===-- scudo_flags.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Header for scudo_flags.cc. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_FLAGS_H_ +#define SCUDO_FLAGS_H_ + +namespace __scudo { + +struct Flags { +#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Type Name; +#include "scudo_flags.inc" +#undef SCUDO_FLAG + + void SetDefaults(); +}; + +extern Flags scudo_flags_dont_use_directly; +inline Flags *flags() { + return &scudo_flags_dont_use_directly; +} + +void InitializeFlags(); + +} // namespace __scudo + +#endif // SCUDO_FLAGS_H_ Index: lib/hardened_allocator/scudo_flags.cc =================================================================== --- lib/hardened_allocator/scudo_flags.cc +++ lib/hardened_allocator/scudo_flags.cc @@ -0,0 +1,61 @@ +//===-- scudo_flags.cc ------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Hardened Allocator flag parsing logic. +/// +//===----------------------------------------------------------------------===// + +#include "scudo_flags.h" + +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_flag_parser.h" + +namespace __scudo { + +Flags scudo_flags_dont_use_directly; // use via flags(). + +void Flags::SetDefaults() { +#define SCUDO_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "scudo_flags.inc" +#undef SCUDO_FLAG +} + +static void RegisterScudoFlags(FlagParser *parser, Flags *f) { +#define SCUDO_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +#include "scudo_flags.inc" +#undef SCUDO_FLAG +} + +void InitializeFlags() { + SetCommonFlagsDefaults(); + { + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.exitcode = 1; + OverrideCommonFlags(cf); + } + Flags *f = flags(); + f->SetDefaults(); + + FlagParser scudo_parser; + RegisterScudoFlags(&scudo_parser, f); + RegisterCommonFlags(&scudo_parser); + + scudo_parser.ParseString(GetEnv("SCUDO_OPTIONS")); + + InitializeCommonFlags(); + + if (f->quarantine_size_mb < 0) { + const int kDefaultQuarantineSizeMb = 1UL << 6; // 64 MB + f->quarantine_size_mb = kDefaultQuarantineSizeMb; + } +} + +} Index: lib/hardened_allocator/scudo_flags.inc =================================================================== --- lib/hardened_allocator/scudo_flags.inc +++ lib/hardened_allocator/scudo_flags.inc @@ -0,0 +1,30 @@ +//===-- scudo_flags.inc -----------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Hardened Allocator runtime flags. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_FLAG +# error "Define SCUDO_FLAG prior to including this file!" +#endif + +SCUDO_FLAG(int, quarantine_size_mb, -1, + "Size (in Mb) of quarantine used to detect use-after-free " + "errors. Lower value may reduce memory usage but increase the " + "chance of false negatives.") + +SCUDO_FLAG(bool, alloc_dealloc_mismatch, true, + "Report errors on malloc/delete, new/free, new/delete[], etc.") + +SCUDO_FLAG(bool, new_delete_size_mismatch, true, + "Report errors on mismatch between size of new and delete.") + +SCUDO_FLAG(bool, zero_chunk_contents, false, + "Zero chunk contents on allocation and deallocation.") Index: lib/hardened_allocator/scudo_malloc_linux.cc =================================================================== --- lib/hardened_allocator/scudo_malloc_linux.cc +++ lib/hardened_allocator/scudo_malloc_linux.cc @@ -0,0 +1,75 @@ +//===-- scudo_malloc_linux.cc -----------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Linux specific malloc interception functions. +/// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_LINUX + +#include "scudo_allocator.h" + +#include "interception/interception.h" + +using namespace __scudo; + +INTERCEPTOR(void, free, void *ptr) { + scudo_free(ptr, FROM_MALLOC); +} + +INTERCEPTOR(void, cfree, void *ptr) { + scudo_free(ptr, FROM_MALLOC); +} + +INTERCEPTOR(void*, malloc, uptr size) { + return scudo_malloc(size, FROM_MALLOC); +} + +INTERCEPTOR(void*, realloc, void *ptr, uptr size) { + return scudo_realloc(ptr, size); +} + +INTERCEPTOR(void*, calloc, uptr nmemb, uptr size) { + return scudo_calloc(nmemb, size); +} + +INTERCEPTOR(void*, valloc, uptr size) { + return scudo_valloc(size); +} + +INTERCEPTOR(void*, memalign, uptr alignment, uptr size) { + return scudo_memalign(alignment, size); +} + +INTERCEPTOR(void*, __libc_memalign, uptr alignment, uptr size) { + return scudo_memalign(alignment, size); +} + +INTERCEPTOR(void*, pvalloc, uptr size) { + return scudo_pvalloc(size); +} + +INTERCEPTOR(void*, aligned_alloc, uptr alignment, uptr size) { + return scudo_aligned_alloc(alignment, size); +} + +INTERCEPTOR(int, posix_memalign, void **memptr, uptr alignment, uptr size) { + return scudo_posix_memalign(memptr, alignment, size); +} + +INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { + return scudo_malloc_usable_size(ptr); +} + +INTERCEPTOR(int, mallopt, int cmd, int value) { + return -1; +} + +#endif Index: lib/hardened_allocator/scudo_new_delete.cc =================================================================== --- lib/hardened_allocator/scudo_new_delete.cc +++ lib/hardened_allocator/scudo_new_delete.cc @@ -0,0 +1,69 @@ +//===-- scudo_new_delete.cc -------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Interceptors for operators new and delete. +/// +//===----------------------------------------------------------------------===// + +#include "scudo_allocator.h" + +#include "interception/interception.h" + +#include + +using namespace __scudo; + +#define CXX_OPERATOR_ATTRIBUTE INTERCEPTOR_ATTRIBUTE + +// Fake std::nothrow_t to avoid including . +namespace std { +struct nothrow_t {}; +} // namespace std + +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size) { + return scudo_malloc(size, FROM_NEW); +} +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size) { + return scudo_malloc(size, FROM_NEWARRAY); +} +CXX_OPERATOR_ATTRIBUTE +void *operator new(size_t size, std::nothrow_t const&) { + return scudo_malloc(size, FROM_NEW); +} +CXX_OPERATOR_ATTRIBUTE +void *operator new[](size_t size, std::nothrow_t const&) { + return scudo_malloc(size, FROM_NEWARRAY); +} + +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr) NOEXCEPT { + return scudo_free(ptr, FROM_NEW); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr) NOEXCEPT { + return scudo_free(ptr, FROM_NEWARRAY); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, std::nothrow_t const&) NOEXCEPT { + return scudo_free(ptr, FROM_NEW); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, std::nothrow_t const&) NOEXCEPT { + return scudo_free(ptr, FROM_NEWARRAY); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete(void *ptr, size_t size) NOEXCEPT { + scudo_sized_free(ptr, size, FROM_NEW); +} +CXX_OPERATOR_ATTRIBUTE +void operator delete[](void *ptr, size_t size) NOEXCEPT { + scudo_sized_free(ptr, size, FROM_NEWARRAY); +} Index: lib/hardened_allocator/scudo_rtl.cc =================================================================== --- lib/hardened_allocator/scudo_rtl.cc +++ lib/hardened_allocator/scudo_rtl.cc @@ -0,0 +1,51 @@ +//===-- scudo_rtl.cc --------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Main file for the Hardened Allocator runtime library. +/// +//===----------------------------------------------------------------------===// + +#include "scudo_allocator.h" + +namespace __scudo { + +bool scudo_inited; +bool scudo_init_is_running; + +static void ScudoInitInternal() { + if (LIKELY(scudo_inited)) + return; + SanitizerToolName = "Scudo"; + CHECK(!scudo_init_is_running && "Scudo init calls itself!"); + scudo_init_is_running = true; + + InitializeFlags(); + + AllocatorOptions allocator_options; + allocator_options.SetFrom(flags(), common_flags()); + InitializeAllocator(allocator_options); + + scudo_inited = true; + scudo_init_is_running = false; +} + +} // namespace __scudo + +using namespace __scudo; + +void __scudo_init() { + ScudoInitInternal(); +} + +#if SANITIZER_CAN_USE_PREINIT_ARRAY +__attribute__((section(".preinit_array"), used)) + void (*__local_scudo_preinit)(void) = __scudo_init; +#else +#error "Can't use .preinit_array" +#endif Index: lib/hardened_allocator/scudo_utils.h =================================================================== --- lib/hardened_allocator/scudo_utils.h +++ lib/hardened_allocator/scudo_utils.h @@ -0,0 +1,69 @@ +//===-- scudo_utils.h -------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Header for scudo_utils.cc. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_UTILS_H_ +#define SCUDO_UTILS_H_ + +#include + +#include "sanitizer_common/sanitizer_common.h" + +namespace __scudo { + +template +inline Dest bit_cast(const Source& source) { + typedef char VerifySizesAreEqual[sizeof(Dest) == sizeof(Source) ? 1 : -1] + UNUSED; + Dest dest; + memcpy(&dest, &source, sizeof(dest)); + return dest; +} + +enum CPUFeature { + SSE4_2 = 0, + RDRAND = 1, + ENUM_CPUFEATURE_MAX +}; +bool TestCPUFeature(CPUFeature feature); + +// Tiny PRNG based on https://en.wikipedia.org/wiki/Xorshift#xorshift.2B +// The state (128 bits) will be stored in thread local storage +struct Xorshift128Plus { + public: + Xorshift128Plus(); + Xorshift128Plus(u64 state_0, u64 state_1) + : state_0_(state_0), state_1_(state_1) {} + void SetSeed(u64 state_0, u64 state_1) { + state_0_ = state_0; + state_1_ = state_1; + } + void GetSeed(u64 *state_0, u64 *state_1) { + *state_0 = state_0_; + *state_1 = state_1_; + } + u64 Next() { + u64 x = state_0_; + const u64 y = state_1_; + state_0_ = y; + x ^= x << 23; // a + state_1_ = x ^ y ^ (x >> 17) ^ (y >> 26); // b, c + return state_1_ + y; + } + private: + u64 state_0_; + u64 state_1_; +}; + +} // namespace __scudo + +#endif // SCUDO_UTILS_H_ Index: lib/hardened_allocator/scudo_utils.cc =================================================================== --- lib/hardened_allocator/scudo_utils.cc +++ lib/hardened_allocator/scudo_utils.cc @@ -0,0 +1,120 @@ +//===-- scudo_utils.cc ------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Platform specific utility functions. +/// +//===----------------------------------------------------------------------===// + +#include "scudo_utils.h" + +#include +#include // for std::chrono::high_resolution_clock +#include // for std::hash +#include // for std::this_thread + +namespace __scudo { + +typedef struct { + u32 eax; + u32 ebx; + u32 ecx; + u32 edx; +} CPUIDInfo; + +static void cpuid(CPUIDInfo *info, u32 leaf, u32 subleaf) +{ + asm volatile("cpuid" + : "=a" (info->eax), "=b" (info->ebx), "=c" (info->ecx), "=d" (info->edx) + : "a" (leaf), "c" (subleaf) + ); +} + +// Returns true is the CPU is a "GenuineIntel" +static bool IsIntelCPU() +{ + CPUIDInfo info; + + cpuid(&info, 0, 0); + if (memcmp(reinterpret_cast(&info.ebx), "Genu", 4) || + memcmp(reinterpret_cast(&info.edx), "ineI", 4) || + memcmp(reinterpret_cast(&info.ecx), "ntel", 4)) { + return false; + } + return true; +} + +bool TestCPUFeature(CPUFeature feature) +{ + static bool kInfoInitialized = false; + static CPUIDInfo kCPUInfo = {}; + + if (kInfoInitialized == false) { + if (IsIntelCPU() == true) + cpuid(&kCPUInfo, 1, 0); + kInfoInitialized = true; + } + switch (feature) { + case SSE4_2: + return ((kCPUInfo.ecx >> 20) & 0x1) != 0; + case RDRAND: + return ((kCPUInfo.ecx >> 30) & 0x1) != 0; + default: + break; + } + return false; +} + +static u64 RdTSC() { + // Clang: __builtin_readcyclecounter + u64 low, high; + __asm__ volatile("rdtsc" : "=a" (low), "=d" (high)); + return (high << 32) | low; +} + +// RdRand64 will call rdrand if the feature is available for the CPU, otherwise +// it will use a XOR of the cycle counter, the high resolution clock and the +// thread ID hash. +static u64 RdRand64() { + static s8 kHasRdRand = -1; + if (kHasRdRand == -1) { + kHasRdRand = TestCPUFeature(RDRAND); + } + if (kHasRdRand == 1) { + register u64 rnd; + u8 carry; + + // Normally we need only one execution + asm volatile("rdrand %0; setc %1": "=r" (rnd), "=qm" (carry)); + if (carry != 0) + return rnd; // Success + + // If the first attempt failed, we fall back to retries. + for (s32 c = 10; c != 0; --c) { + asm volatile("rdrand %0; setc %1": "=r" (rnd), "=qm" (carry)); + if (carry != 0) + return rnd; // Success + } + + // All attempts failed. Log CPU error and abort. + Printf("ERROR: CPU error detected during 64-bit RDRAND execution.\n"); + Die(); + } else { + std::hash hasher; + return RdTSC() ^ hasher(std::this_thread::get_id()) ^ + std::chrono::high_resolution_clock::now().time_since_epoch().count(); + } +} + +// Default constructor for Xorshift128Plus seeds the state with RdRand64 +Xorshift128Plus::Xorshift128Plus() { + state_0_ = RdRand64(); + state_1_ = RdRand64(); +} + +} // namespace __scudo Index: test/CMakeLists.txt =================================================================== --- test/CMakeLists.txt +++ test/CMakeLists.txt @@ -73,6 +73,9 @@ if(COMPILER_RT_HAS_ESAN) add_subdirectory(esan) endif() + if(COMPILER_RT_HAS_HARDENED_ALLOCATOR) + add_subdirectory(hardened_allocator) + endif() endif() if(COMPILER_RT_STANDALONE_BUILD) Index: test/hardened_allocator/CMakeLists.txt =================================================================== --- test/hardened_allocator/CMakeLists.txt +++ test/hardened_allocator/CMakeLists.txt @@ -0,0 +1,21 @@ +set(HARDENED_ALLOCATOR_LIT_SOURCE_DIR ${CMAKE_CURRENT_SOURCE_DIR}) +set(HARDENED_ALLOCATOR_LIT_BINARY_DIR ${CMAKE_CURRENT_BINARY_DIR}) + + +set(HARDENED_ALLOCATOR_TEST_DEPS ${SANITIZER_COMMON_LIT_TEST_DEPS}) +if(NOT COMPILER_RT_STANDALONE_BUILD) + list(APPEND HARDENED_ALLOCATOR_TEST_DEPS hardened_allocator) +endif() + +configure_lit_site_cfg( + ${CMAKE_CURRENT_SOURCE_DIR}/lit.site.cfg.in + ${CMAKE_CURRENT_BINARY_DIR}/lit.site.cfg + ) + +add_lit_testsuite(check-hardened_allocator + "Running the Hardened Allocator tests" + ${CMAKE_CURRENT_BINARY_DIR} + DEPENDS ${HARDENED_ALLOCATOR_TEST_DEPS}) +set_target_properties(check-hardened_allocator PROPERTIES FOLDER + "Hardened Allocator tests") + Index: test/hardened_allocator/alignment.cc =================================================================== --- test/hardened_allocator/alignment.cc +++ test/hardened_allocator/alignment.cc @@ -0,0 +1,25 @@ +// RUN: %clang_scudo %s -o %t +// RUN: not %run %t pointers 2>&1 | FileCheck %s + +#include +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + void *p, *old_p; + size_t alignment = 1U << 16, size = 1U << 8; + + assert(argc == 2); + if (!strcmp(argv[1], "pointers")) { + p = malloc(size); + if (p == NULL) + return 1; + free(reinterpret_cast(reinterpret_cast(p) | 8)); + } + return 0; +} + +// CHECK: ERROR: attempted to deallocate a chunk not properly aligned Index: test/hardened_allocator/double-free.cc =================================================================== --- test/hardened_allocator/double-free.cc +++ test/hardened_allocator/double-free.cc @@ -0,0 +1,32 @@ +// RUN: %clang_scudo %s -o %t +// RUN: not %run %t malloc 2>&1 | FileCheck %s +// RUN: not %run %t new 2>&1 | FileCheck %s +// RUN: not %run %t newarray 2>&1 | FileCheck %s + +#include +#include +#include + +int main(int argc, char **argv) +{ + assert(argc == 2); + if (!strcmp(argv[1], "malloc")) { + void *p = malloc(sizeof(int)); + free(p); + free(p); + } + if (!strcmp(argv[1], "new")) { + int *p = new int; + delete p; + delete p; + } + if (!strcmp(argv[1], "newarray")) { + int *p = new int[8]; + delete[] p; + delete[] p; + } + return 0; +} + +// CHECK: ERROR: invalid chunk state when deallocating address + Index: test/hardened_allocator/init.c =================================================================== --- test/hardened_allocator/init.c +++ test/hardened_allocator/init.c @@ -0,0 +1,7 @@ +// RUN: %clang_scudo %s -o %t && %run %t + +int main(int argc, char **argv) +{ + return 0; +} + Index: test/hardened_allocator/lit.cfg =================================================================== --- test/hardened_allocator/lit.cfg +++ test/hardened_allocator/lit.cfg @@ -0,0 +1,38 @@ +# -*- Python -*- + +import os + +# Setup config name. +config.name = 'Hardened Allocator' + +# Setup source root. +config.test_source_root = os.path.dirname(__file__) + +# Path to the static library +base_lib = os.path.join(config.compiler_rt_libdir, + "libclang_rt.hardened_allocator-%s.a" % config.target_arch) +whole_archive = "-Wl,-whole-archive %s -Wl,-no-whole-archive " % base_lib + +# Test suffixes. +config.suffixes = ['.c', '.cc', '.cpp', '.m', '.mm', '.ll', '.test'] + +# C flags. +c_flags = ["-lstdc++", + "-ldl", + "-lrt", + "-pthread", + "-latomic", #for __atomic_load_16, __atomic_store_16, __atomic_compare_exchange_16 + "-fPIE", + "-pie", + "-O0"] + +def build_invocation(compile_flags): + return " " + " ".join([config.clang] + compile_flags) + " " + +# Add clang substitutions. +config.substitutions.append( ("%clang_scudo ", + build_invocation(c_flags) + whole_archive ) ) + +# Hardened Allocator tests are currently supported on Linux only. +if config.host_os not in ['Linux']: + config.unsupported = True Index: test/hardened_allocator/lit.site.cfg.in =================================================================== --- test/hardened_allocator/lit.site.cfg.in +++ test/hardened_allocator/lit.site.cfg.in @@ -0,0 +1,7 @@ +@LIT_SITE_CFG_IN_HEADER@ + +# Load common config for all compiler-rt lit tests. +lit_config.load_config(config, "@COMPILER_RT_BINARY_DIR@/test/lit.common.configured") + +# Load tool-specific config that would do the real work. +lit_config.load_config(config, "@HARDENED_ALLOCATOR_LIT_SOURCE_DIR@/lit.cfg") Index: test/hardened_allocator/malloc.cc =================================================================== --- test/hardened_allocator/malloc.cc +++ test/hardened_allocator/malloc.cc @@ -0,0 +1,25 @@ +// RUN: %clang_scudo %s -o %t +// RUN: %run %t 2>&1 + +#include +#include +#include + +int main(int argc, char **argv) +{ + void *p; + size_t size = 1U << 8; + + p = malloc(0); + if (p == NULL) + return 1; + free(p); + p = malloc(size); + if (p == NULL) + return 1; + memset(p, 'A', size); + free(p); + + return 0; +} + Index: test/hardened_allocator/memalign.cc =================================================================== --- test/hardened_allocator/memalign.cc +++ test/hardened_allocator/memalign.cc @@ -0,0 +1,40 @@ +// RUN: %clang_scudo %s -o %t +// RUN: %run %t valid 2>&1 +// RUN: not %run %t invalid 2>&1 | FileCheck %s + +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + void *p; + size_t alignment = 1U << 12; + size_t size = alignment; + + assert(argc == 2); + if (!strcmp(argv[1], "valid")) { + p = memalign(alignment, size); + if (p == NULL) + return 1; + free(p); + p = NULL; + posix_memalign(&p, alignment, size); + if (p == NULL) + return 1; + free(p); + p = aligned_alloc(alignment, size); + if (p == NULL) + return 1; + free(p); + } + if (!strcmp(argv[1], "invalid")) { + p = memalign(alignment - 1, size); + free(p); + } + return 0; +} + +// CHECK: ERROR: alignment is not a power of 2 + Index: test/hardened_allocator/mismatch.cc =================================================================== --- test/hardened_allocator/mismatch.cc +++ test/hardened_allocator/mismatch.cc @@ -0,0 +1,26 @@ +// RUN: %clang_scudo %s -o %t +// RUN: SCUDO_OPTIONS=alloc_dealloc_mismatch=1 not %run %t mallocdel 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=alloc_dealloc_mismatch=0 %run %t mallocdel 2>&1 +// RUN: SCUDO_OPTIONS=alloc_dealloc_mismatch=1 not %run %t newfree 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=alloc_dealloc_mismatch=0 %run %t newfree 2>&1 + +#include +#include +#include + +int main(int argc, char **argv) +{ + assert(argc == 2); + if (!strcmp(argv[1], "mallocdel")) { + int *p = (int *)malloc(16); + delete p; + } + if (!strcmp(argv[1], "newfree")) { + int *p = new int; + free((void *)p); + } + return 0; +} + +// CHECK: ERROR: allocation type mismatch on address + Index: test/hardened_allocator/overflow.cc =================================================================== --- test/hardened_allocator/overflow.cc +++ test/hardened_allocator/overflow.cc @@ -0,0 +1,33 @@ +// RUN: %clang_scudo %s -o %t +// RUN: not %run %t malloc 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=quarantine_size_mb=1 not %run %t quarantine 2>&1 | FileCheck %s + +#include +#include +#include + +int main(int argc, char **argv) +{ + assert(argc == 2); + if (!strcmp(argv[1], "malloc")) { + // Simulate a header corruption of an allocated chunk (1-bit) + void *p = malloc(1U << 4); + ((char *)p)[-1] ^= 1; + free(p); + } + if (!strcmp(argv[1], "quarantine")) { + void *p = malloc(1U << 4); + free(p); + // Simulate a header corruption of a quarantined chunk + ((char *)p)[-2] ^= 1; + // Trigger the quarantine recycle + for (int i = 0; i < 0x100; i++) { + p = malloc(1U << 16); + free(p); + } + } + return 0; +} + +// CHECK: ERROR: corrupted chunk header at address + Index: test/hardened_allocator/quarantine.cc =================================================================== --- test/hardened_allocator/quarantine.cc +++ test/hardened_allocator/quarantine.cc @@ -0,0 +1,40 @@ +// RUN: %clang_scudo %s -o %t +// RUN: SCUDO_OPTIONS=quarantine_size_mb=1 %run %t 2>&1 + +#include +#include +#include + +int main(int argc, char **argv) +{ + void *p, *old_p; + size_t size = 1U << 16; + + // The delayed freelist will prevent a chunk from being available right away + p = malloc(size); + if (p == NULL) + return 1; + old_p = p; + free(p); + p = malloc(size); + if (p == NULL) + return 1; + if (old_p == p) + return 1; + free(p); + + // Eventually the chunk should become available again + bool found = false; + for (int i = 0; i < 0x100 && found == false; i++) { + p = malloc(size); + if (p == NULL) + return 1; + found = (p == old_p); + free(p); + } + if (found == false) + return 1; + + return 0; +} + Index: test/hardened_allocator/realloc.cc =================================================================== --- test/hardened_allocator/realloc.cc +++ test/hardened_allocator/realloc.cc @@ -0,0 +1,63 @@ +// RUN: %clang_scudo %s -o %t +// RUN: %run %t pointers 2>&1 +// RUN: %run %t contents 2>&1 +// RUN: not %run %t memalign 2>&1 | FileCheck %s + +#include +#include +#include + +int main(int argc, char **argv) +{ + void *p, *old_p; + size_t size = 32; + + assert(argc == 2); + if (!strcmp(argv[1], "pointers")) { + old_p = p = realloc(NULL, size); + if (p == NULL) + return 1; + size = malloc_usable_size(p); + // Our realloc implementation will return the same pointer if the size + // requested is lower or equal to the usable size of the associated chunk + p = realloc(p, size - 1); + if (p != old_p) + return 1; + p = realloc(p, size); + if (p != old_p) + return 1; + // And a new one if the size is greater + p = realloc(p, size + 1); + if (p == old_p) + return 1; + // A size of 0 will free the chunk and return NULL + p = realloc(p, 0); + if (p != NULL) + return 1; + old_p = NULL; + } + if (!strcmp(argv[1], "contents")) { + p = realloc(NULL, size); + if (p == NULL) + return 1; + for (int i = 0; i < size; i++) + reinterpret_cast(p)[i] = 'A'; + p = realloc(p, size + 1); + // The contents of the reallocated chunk must match the original one + for (int i = 0; i < size; i++) + if (reinterpret_cast(p)[i] != 'A') + return 1; + } + if (!strcmp(argv[1], "memalign")) { + // A chunk coming from memalign cannot be reallocated + p = memalign(16, size); + if (p == NULL) + return 1; + p = realloc(p, size); + free(p); + } + return 0; +} + +// CHECK: ERROR: invalid chunk type when reallocating address + Index: test/hardened_allocator/sized-delete.cc =================================================================== --- test/hardened_allocator/sized-delete.cc +++ test/hardened_allocator/sized-delete.cc @@ -0,0 +1,36 @@ +// RUN: %clang_scudo -fsized-deallocation %s -o %t +// RUN: SCUDO_OPTIONS=new_delete_size_mismatch=1 %run %t gooddel 2>&1 +// RUN: SCUDO_OPTIONS=new_delete_size_mismatch=1 not %run %t baddel 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=new_delete_size_mismatch=0 %run %t baddel 2>&1 +// RUN: SCUDO_OPTIONS=new_delete_size_mismatch=1 %run %t gooddelarr 2>&1 +// RUN: SCUDO_OPTIONS=new_delete_size_mismatch=1 not %run %t baddelarr 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=new_delete_size_mismatch=0 %run %t baddelarr 2>&1 + +#include +#include +#include +#include + +int main(int argc, char **argv) +{ + assert(argc == 2); + if (!strcmp(argv[1], "gooddel")) { + long long *p = new long long; + operator delete(p, sizeof(long long)); + } + if (!strcmp(argv[1], "baddel")) { + long long *p = new long long; + operator delete(p, 2); + } + if (!strcmp(argv[1], "gooddelarr")) { + char *p = new char[64]; + operator delete[](p, 64); + } + if (!strcmp(argv[1], "baddelarr")) { + char *p = new char[63]; + operator delete[](p, 64); + } + return 0; +} + +// CHECK: ERROR: invalid sized delete on chunk at address Index: test/hardened_allocator/sizes.cc =================================================================== --- test/hardened_allocator/sizes.cc +++ test/hardened_allocator/sizes.cc @@ -0,0 +1,57 @@ +// RUN: %clang_scudo %s -o %t +// RUN: SCUDO_OPTIONS=allocator_may_return_null=0 not %run %t malloc 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=allocator_may_return_null=1 %run %t malloc 2>&1 +// RUN: SCUDO_OPTIONS=allocator_may_return_null=0 not %run %t calloc 2>&1 | FileCheck %s +// RUN: SCUDO_OPTIONS=allocator_may_return_null=1 %run %t calloc 2>&1 +// RUN: %run %t usable 2>&1 + +#include +#include +#include +#include + +#include + +int main(int argc, char **argv) +{ + void *p; + size_t size; + + assert(argc == 2); + if (!strcmp(argv[1], "malloc")) { + // Currently the maximum size the allocator can fullfill is 1ULL<<40 bytes + size = std::numeric_limits::max(); + p = malloc(size); + if (p != NULL) + return 1; + size = (1ULL << 40) - 16; + p = malloc(size); + if (p != NULL) + return 1; + } + if (!strcmp(argv[1], "calloc")) { + // Trigger an overflow in calloc + size = std::numeric_limits::max(); + p = calloc((size / 0x1000) + 1, 0x1000); + if (p != NULL) + return 1; + } + if (!strcmp(argv[1], "usable")) { + // Playing with the actual usable size of a chunk + p = malloc(1007); + size = malloc_usable_size(p); + if (size < 1007) + return 1; + memset(p, 'A', size); + p = realloc(p, 2014); + size = malloc_usable_size(p); + if (size < 2014) + return 1; + memset(p, 'B', size); + free(p); + } + return 0; +} + +// CHECK: allocator is terminating the process +