diff --git a/compiler-rt/lib/dfsan/CMakeLists.txt b/compiler-rt/lib/dfsan/CMakeLists.txt --- a/compiler-rt/lib/dfsan/CMakeLists.txt +++ b/compiler-rt/lib/dfsan/CMakeLists.txt @@ -3,6 +3,7 @@ # Runtime library sources and build flags. set(DFSAN_RTL_SOURCES dfsan.cpp + dfsan_allocator.cpp dfsan_chained_origin_depot.cpp dfsan_custom.cpp dfsan_interceptors.cpp @@ -11,6 +12,7 @@ set(DFSAN_RTL_HEADERS dfsan.h + dfsan_allocator.h dfsan_chained_origin_depot.h dfsan_flags.inc dfsan_flags.h diff --git a/compiler-rt/lib/dfsan/dfsan.h b/compiler-rt/lib/dfsan/dfsan.h --- a/compiler-rt/lib/dfsan/dfsan.h +++ b/compiler-rt/lib/dfsan/dfsan.h @@ -19,6 +19,10 @@ #include "dfsan_flags.h" #include "dfsan_platform.h" +#ifndef DFSAN_REPLACE_OPERATORS_NEW_AND_DELETE +#define DFSAN_REPLACE_OPERATORS_NEW_AND_DELETE 1 +#endif + using __sanitizer::u16; using __sanitizer::u32; using __sanitizer::uptr; @@ -63,7 +67,10 @@ namespace __dfsan { -void InitializeInterceptors(); +extern int dfsan_inited; +extern bool dfsan_init_is_running; + +void initialize_interceptors(); inline dfsan_label *shadow_for(void *ptr) { return (dfsan_label *) ((((uptr) ptr) & ShadowMask()) << 1); @@ -96,6 +103,24 @@ return is_shadow_addr_valid((uptr)ptr_s); } +void dfsan_copy_memory(void *dst, const void *src, uptr size); +void dfsan_release_meta_memory(const void *addr, uptr size); + +void dfsan_allocator_init(); +void dfsan_deallocate(void *ptr); + +void *dfsan_malloc(uptr size); +void *dfsan_calloc(uptr nmemb, uptr size); +void *dfsan_realloc(void *ptr, uptr size); +void *dfsan_reallocarray(void *ptr, uptr nmemb, uptr size); +void *dfsan_valloc(uptr size); +void *dfsan_pvalloc(uptr size); +void *dfsan_aligned_alloc(uptr alignment, uptr size); +void *dfsan_memalign(uptr alignment, uptr size); +int dfsan_posix_memalign(void **memptr, uptr alignment, uptr size); + +void dfsan_init(); + } // namespace __dfsan #endif // DFSAN_H diff --git a/compiler-rt/lib/dfsan/dfsan.cpp b/compiler-rt/lib/dfsan/dfsan.cpp --- a/compiler-rt/lib/dfsan/dfsan.cpp +++ b/compiler-rt/lib/dfsan/dfsan.cpp @@ -21,7 +21,6 @@ #include "dfsan/dfsan.h" #include "dfsan/dfsan_chained_origin_depot.h" -#include "dfsan/dfsan_flags.h" #include "dfsan/dfsan_origin.h" #include "dfsan/dfsan_thread.h" #include "sanitizer_common/sanitizer_atomic.h" @@ -540,10 +539,18 @@ *(u32 *)(end - kOriginAlign) = origin; } -static void WriteShadowIfDifferent(dfsan_label label, uptr shadow_addr, - uptr size) { - dfsan_label *labelp = (dfsan_label *)shadow_addr; - for (; size != 0; --size, ++labelp) { +static void WriteShadowInRange(dfsan_label label, uptr beg_shadow_addr, + uptr end_shadow_addr) { + // TODO: After changing dfsan_label to 8bit, use internal_memset when label + // is not 0. + if (label) { + dfsan_label *labelp = (dfsan_label *)beg_shadow_addr; + for (; (uptr)labelp < end_shadow_addr; ++labelp) *labelp = label; + return; + } + + dfsan_label *labelp = (dfsan_label *)beg_shadow_addr; + for (; (uptr)labelp < end_shadow_addr; ++labelp) { // Don't write the label if it is already the value we need it to be. // In a program where most addresses are not labeled, it is common that // a page of shadow memory is entirely zeroed. The Linux copy-on-write @@ -552,13 +559,18 @@ // the value written does not change the value in memory. Avoiding the // write when both |label| and |*labelp| are zero dramatically reduces // the amount of real memory used by large programs. - if (label == *labelp) + if (!*labelp) continue; - *labelp = label; + *labelp = 0; } } +static void WriteShadowWithSize(dfsan_label label, uptr shadow_addr, + uptr size) { + WriteShadowInRange(label, shadow_addr, shadow_addr + size * sizeof(label)); +} + #define RET_CHAIN_ORIGIN(id) \ GET_CALLER_PC_BP_SP; \ (void)sp; \ @@ -597,6 +609,37 @@ __dfsan_mem_origin_transfer(dst, src, len); } +namespace __dfsan { + +int dfsan_inited = 0; +bool dfsan_init_is_running = false; + +void dfsan_copy_memory(void *dst, const void *src, uptr size) { + internal_memcpy(dst, src, size); + internal_memcpy((void *)shadow_for(dst), (const void *)shadow_for(src), + size * sizeof(dfsan_label)); + if (__dfsan_get_track_origins()) + dfsan_mem_origin_transfer(dst, src, size); +} + +void dfsan_release_meta_memory(const void *addr, uptr size) { + dfsan_set_label(0, const_cast(addr), size); + + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr); + const void *end_addr = (void *)((uptr)addr + size); + const uptr end_shadow_addr = (uptr)__dfsan::shadow_for(end_addr); + ReleaseMemoryPagesToOS(beg_shadow_addr, end_shadow_addr); + if (__dfsan_get_track_origins()) { + const uptr beg_origin_addr = (uptr)__dfsan::origin_for(addr); + const uptr end_origin_addr = (uptr)__dfsan::origin_for(end_addr); + ReleaseMemoryPagesToOS(beg_origin_addr, end_origin_addr); + } +} + +} // namespace __dfsan + // If the label s is tainted, set the size bytes from the address p to be a new // origin chain with the previous ID o and the current stack trace. This is // used by instrumentation to reduce code size when too much code is inserted. @@ -610,63 +653,62 @@ } } -// Releases the pages within the origin address range, and sets the origin -// addresses not on the pages to be 0. -static void ReleaseOrClearOrigins(void *addr, uptr size) { +// Releases the pages within the origin address range. +static void ReleaseOrigins(void *addr, uptr size) { const uptr beg_origin_addr = (uptr)__dfsan::origin_for(addr); const void *end_addr = (void *)((uptr)addr + size); const uptr end_origin_addr = (uptr)__dfsan::origin_for(end_addr); + + if (end_origin_addr - beg_origin_addr < + common_flags()->clear_shadow_mmap_threshold) + return; + const uptr page_size = GetPageSizeCached(); const uptr beg_aligned = RoundUpTo(beg_origin_addr, page_size); const uptr end_aligned = RoundDownTo(end_origin_addr, page_size); - // dfsan_set_label can be called from the following cases - // 1) mapped ranges by new/delete and malloc/free. This case has origin memory - // size > 50k, and happens less frequently. - // 2) zero-filling internal data structures by utility libraries. This case - // has origin memory size < 16k, and happens more often. - // Set kNumPagesThreshold to be 4 to avoid releasing small pages. - const int kNumPagesThreshold = 4; - if (beg_aligned + kNumPagesThreshold * page_size >= end_aligned) - return; - - ReleaseMemoryPagesToOS(beg_aligned, end_aligned); + if (!MmapFixedSuperNoReserve(beg_aligned, end_aligned - beg_aligned)) + Die(); } void SetShadow(dfsan_label label, void *addr, uptr size, dfsan_origin origin) { const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr); if (0 != label) { - WriteShadowIfDifferent(label, beg_shadow_addr, size); + WriteShadowWithSize(label, beg_shadow_addr, size); if (__dfsan_get_track_origins()) SetOrigin(addr, size, origin); return; } if (__dfsan_get_track_origins()) - ReleaseOrClearOrigins(addr, size); + ReleaseOrigins(addr, size); // If label is 0, releases the pages within the shadow address range, and sets // the shadow addresses not on the pages to be 0. const void *end_addr = (void *)((uptr)addr + size); const uptr end_shadow_addr = (uptr)__dfsan::shadow_for(end_addr); + + if (end_shadow_addr - beg_shadow_addr < + common_flags()->clear_shadow_mmap_threshold) + return WriteShadowWithSize(label, beg_shadow_addr, size); + const uptr page_size = GetPageSizeCached(); const uptr beg_aligned = RoundUpTo(beg_shadow_addr, page_size); const uptr end_aligned = RoundDownTo(end_shadow_addr, page_size); - // dfsan_set_label can be called from the following cases - // 1) mapped ranges by new/delete and malloc/free. This case has shadow memory - // size > 100k, and happens less frequently. - // 2) zero-filling internal data structures by utility libraries. This case - // has shadow memory size < 32k, and happens more often. - // Set kNumPagesThreshold to be 8 to avoid releasing small pages. - const int kNumPagesThreshold = 8; - if (beg_aligned + kNumPagesThreshold * page_size >= end_aligned) - return WriteShadowIfDifferent(label, beg_shadow_addr, size); - - WriteShadowIfDifferent(label, beg_shadow_addr, beg_aligned - beg_shadow_addr); + if (beg_aligned >= end_aligned) { + WriteShadowWithSize(0, beg_shadow_addr, size); + } else { + if (beg_aligned != beg_shadow_addr) + WriteShadowInRange(0, beg_shadow_addr, beg_aligned); + if (end_aligned != end_shadow_addr) + WriteShadowInRange(0, end_aligned, end_shadow_addr); + if (!MmapFixedSuperNoReserve(beg_aligned, end_aligned - beg_aligned)) + Die(); + } + ReleaseMemoryPagesToOS(beg_aligned, end_aligned); - WriteShadowIfDifferent(label, end_aligned, end_shadow_addr - end_aligned); } extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_set_label( @@ -916,6 +958,12 @@ static void InitializeFlags() { SetCommonFlagsDefaults(); + { + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.intercept_tls_get_addr = true; + OverrideCommonFlags(cf); + } flags().SetDefaults(); FlagParser parser; @@ -981,7 +1029,13 @@ Die(); } -static void dfsan_init(int argc, char **argv, char **envp) { +static void DFsanInit(int argc, char **argv, char **envp) { + CHECK(!dfsan_init_is_running); + if (dfsan_inited) + return; + dfsan_init_is_running = true; + SanitizerToolName = "DataflowSanitizer"; + InitializeFlags(); ::InitializePlatformEarly(); @@ -995,11 +1049,11 @@ // will load our executable in the middle of our unused region. This mostly // works so long as the program doesn't use too much memory. We support this // case by disabling memory protection when ASLR is disabled. - uptr init_addr = (uptr)&dfsan_init; + uptr init_addr = (uptr)&DFsanInit; if (!(init_addr >= UnusedAddr() && init_addr < AppAddr())) MmapFixedNoAccess(UnusedAddr(), AppAddr() - UnusedAddr()); - InitializeInterceptors(); + initialize_interceptors(); // Register the fini callback to run when the program terminates successfully // or it is killed by the runtime. @@ -1008,14 +1062,27 @@ // Set up threads DFsanTSDInit(DFsanTSDDtor); + + dfsan_allocator_init(); + DFsanThread *main_thread = DFsanThread::Create(nullptr, nullptr, nullptr); SetCurrentThread(main_thread); main_thread->ThreadStart(); __dfsan_label_info[kInitializingLabel].desc = ""; + + dfsan_init_is_running = false; + dfsan_inited = 1; } +namespace __dfsan { + +void dfsan_init() { DFsanInit(0, nullptr, nullptr); } + +} // namespace __dfsan + #if SANITIZER_CAN_USE_PREINIT_ARRAY -__attribute__((section(".preinit_array"), used)) -static void (*dfsan_init_ptr)(int, char **, char **) = dfsan_init; +__attribute__((section(".preinit_array"), + used)) static void (*dfsan_init_ptr)(int, char **, + char **) = DFsanInit; #endif diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.h b/compiler-rt/lib/dfsan/dfsan_allocator.h new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/dfsan/dfsan_allocator.h @@ -0,0 +1,30 @@ +//===-- dfsan_allocator.h ---------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of DataflowSanitizer. +// +//===----------------------------------------------------------------------===// + +#ifndef DFSAN_ALLOCATOR_H +#define DFSAN_ALLOCATOR_H + +#include "sanitizer_common/sanitizer_common.h" + +namespace __dfsan { + +struct DFsanThreadLocalMallocStorage { + ALIGNED(8) uptr allocator_cache[96 * (512 * 8 + 16)]; // Opaque. + void CommitBack(); + + private: + // These objects are allocated via mmap() and are zero-initialized. + DFsanThreadLocalMallocStorage() {} +}; + +} // namespace __dfsan +#endif // DFSAN_ALLOCATOR_H diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/compiler-rt/lib/dfsan/dfsan_allocator.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/dfsan/dfsan_allocator.cpp @@ -0,0 +1,293 @@ +//===-- dfsan_allocator.cpp -------------------------- --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of DataflowSanitizer. +// +// DataflowSanitizer allocator. +//===----------------------------------------------------------------------===// + +#include "dfsan_allocator.h" + +#include "dfsan.h" +#include "dfsan_thread.h" +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_allocator_report.h" +#include "sanitizer_common/sanitizer_errno.h" + +namespace __dfsan { + +struct Metadata { + uptr requested_size; +}; + +struct DFsanMapUnmapCallback { + void OnMap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); } + void OnUnmap(uptr p, uptr size) const { + __dfsan::dfsan_release_meta_memory((const void *)p, size); + } +}; + +// TODO: DFSan assumes application memory starts from 0x700000008000. For +// unknown reason, the sanitizer allocator does not support any start address +// between 0x701000000000 and 0x700000008000. After switching to fast8labels +// mode, DFSan memory layout will be changed to the same to MSan's. Then we +// set the start address to 0x700000000000 as MSan. +static const uptr kAllocatorSpace = 0x701000000000ULL; +static const uptr kMaxAllowedMallocSize = 8UL << 30; + +struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = kAllocatorSpace; + static const uptr kSpaceSize = 0x40000000000; // 4T. + static const uptr kMetadataSize = sizeof(Metadata); + typedef DefaultSizeClassMap SizeClassMap; + typedef DFsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + using AddressSpaceView = LocalAddressSpaceView; +}; + +typedef SizeClassAllocator64 PrimaryAllocator; + +typedef CombinedAllocator Allocator; +typedef Allocator::AllocatorCache AllocatorCache; + +static Allocator allocator; +static AllocatorCache fallback_allocator_cache; +static StaticSpinMutex fallback_mutex; + +static uptr max_malloc_size; + +void dfsan_allocator_init() { + SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); + allocator.Init(common_flags()->allocator_release_to_os_interval_ms); + if (common_flags()->max_allocation_size_mb) + max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20, + kMaxAllowedMallocSize); + else + max_malloc_size = kMaxAllowedMallocSize; +} + +AllocatorCache *GetAllocatorCache(DFsanThreadLocalMallocStorage *ms) { + CHECK(ms); + CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); + return reinterpret_cast(ms->allocator_cache); +} + +void DFsanThreadLocalMallocStorage::CommitBack() { + allocator.SwallowCache(GetAllocatorCache(this)); +} + +static void *DFsanAllocate(uptr size, uptr alignment, bool zeroise) { + if (size > max_malloc_size) { + if (AllocatorMayReturnNull()) { + Report("WARNING: DataflowSanitizer failed to allocate 0x%zx bytes\n", + size); + return nullptr; + } + BufferedStackTrace stack; + ReportAllocationSizeTooBig(size, max_malloc_size, &stack); + } + DFsanThread *t = GetCurrentThread(); + void *allocated; + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocated = allocator.Allocate(cache, size, alignment); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocated = allocator.Allocate(cache, size, alignment); + } + if (UNLIKELY(!allocated)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + BufferedStackTrace stack; + ReportOutOfMemory(size, &stack); + } + Metadata *meta = + reinterpret_cast(allocator.GetMetaData(allocated)); + meta->requested_size = size; + if (zeroise) { + internal_memset(allocated, 0, size); + dfsan_set_label(0, allocated, size); + } else if (flags().zero_in_malloc) { + dfsan_set_label(0, allocated, size); + } + return allocated; +} + +void dfsan_deallocate(void *p) { + CHECK(p); + Metadata *meta = reinterpret_cast(allocator.GetMetaData(p)); + uptr size = meta->requested_size; + meta->requested_size = 0; + if (flags().zero_in_free) + dfsan_set_label(0, p, size); + DFsanThread *t = GetCurrentThread(); + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocator.Deallocate(cache, p); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocator.Deallocate(cache, p); + } +} + +void *DFsanReallocate(void *old_p, uptr new_size, uptr alignment) { + Metadata *meta = reinterpret_cast(allocator.GetMetaData(old_p)); + uptr old_size = meta->requested_size; + uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); + if (new_size <= actually_allocated_size) { + // We are not reallocating here. + meta->requested_size = new_size; + if (new_size > old_size && flags().zero_in_malloc) + dfsan_set_label(0, (char *)old_p + old_size, new_size - old_size); + return old_p; + } + uptr memcpy_size = Min(new_size, old_size); + void *new_p = DFsanAllocate(new_size, alignment, false /*zeroise*/); + if (new_p) { + dfsan_copy_memory(new_p, old_p, memcpy_size); + dfsan_deallocate(old_p); + } + return new_p; +} + +void *DFsanCalloc(uptr nmemb, uptr size) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + BufferedStackTrace stack; + ReportCallocOverflow(nmemb, size, &stack); + } + return DFsanAllocate(nmemb * size, sizeof(u64), true /*zeroise*/); +} + +static uptr AllocationSize(const void *p) { + if (!p) + return 0; + const void *beg = allocator.GetBlockBegin(p); + if (beg != p) + return 0; + Metadata *b = (Metadata *)allocator.GetMetaData(p); + return b->requested_size; +} + +void *dfsan_malloc(uptr size) { + return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/)); +} + +void *dfsan_calloc(uptr nmemb, uptr size) { + return SetErrnoOnNull(DFsanCalloc(nmemb, size)); +} + +void *dfsan_realloc(void *ptr, uptr size) { + if (!ptr) + return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/)); + if (size == 0) { + dfsan_deallocate(ptr); + return nullptr; + } + return SetErrnoOnNull(DFsanReallocate(ptr, size, sizeof(u64))); +} + +void *dfsan_reallocarray(void *ptr, uptr nmemb, uptr size) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + BufferedStackTrace stack; + ReportReallocArrayOverflow(nmemb, size, &stack); + } + return dfsan_realloc(ptr, nmemb * size); +} + +void *dfsan_valloc(uptr size) { + return SetErrnoOnNull( + DFsanAllocate(size, GetPageSizeCached(), false /*zeroise*/)); +} + +void *dfsan_pvalloc(uptr size) { + uptr PageSize = GetPageSizeCached(); + if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + BufferedStackTrace stack; + ReportPvallocOverflow(size, &stack); + } + // pvalloc(0) should allocate one page. + size = size ? RoundUpTo(size, PageSize) : PageSize; + return SetErrnoOnNull(DFsanAllocate(size, PageSize, false /*zeroise*/)); +} + +void *dfsan_aligned_alloc(uptr alignment, uptr size) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + BufferedStackTrace stack; + ReportInvalidAlignedAllocAlignment(size, alignment, &stack); + } + return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/)); +} + +void *dfsan_memalign(uptr alignment, uptr size) { + if (UNLIKELY(!IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + BufferedStackTrace stack; + ReportInvalidAllocationAlignment(alignment, &stack); + } + return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/)); +} + +int dfsan_posix_memalign(void **memptr, uptr alignment, uptr size) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + if (AllocatorMayReturnNull()) + return errno_EINVAL; + BufferedStackTrace stack; + ReportInvalidPosixMemalignAlignment(alignment, &stack); + } + void *ptr = DFsanAllocate(size, alignment, false /*zeroise*/); + if (UNLIKELY(!ptr)) + // OOM error is already taken care of by DFsanAllocate. + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +} // namespace __dfsan + +using namespace __dfsan; + +uptr __sanitizer_get_current_allocated_bytes() { + uptr stats[AllocatorStatCount]; + allocator.GetStats(stats); + return stats[AllocatorStatAllocated]; +} + +uptr __sanitizer_get_heap_size() { + uptr stats[AllocatorStatCount]; + allocator.GetStats(stats); + return stats[AllocatorStatMapped]; +} + +uptr __sanitizer_get_free_bytes() { return 1; } + +uptr __sanitizer_get_unmapped_bytes() { return 1; } + +uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } + +int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } + +uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } diff --git a/compiler-rt/lib/dfsan/dfsan_custom.cpp b/compiler-rt/lib/dfsan/dfsan_custom.cpp --- a/compiler-rt/lib/dfsan/dfsan_custom.cpp +++ b/compiler-rt/lib/dfsan/dfsan_custom.cpp @@ -460,24 +460,6 @@ return r; } -SANITIZER_INTERFACE_ATTRIBUTE void *__dfsw_calloc(size_t nmemb, size_t size, - dfsan_label nmemb_label, - dfsan_label size_label, - dfsan_label *ret_label) { - void *p = calloc(nmemb, size); - dfsan_set_label(0, p, nmemb * size); - *ret_label = 0; - return p; -} - -SANITIZER_INTERFACE_ATTRIBUTE void *__dfso_calloc( - size_t nmemb, size_t size, dfsan_label nmemb_label, dfsan_label size_label, - dfsan_label *ret_label, dfsan_origin nmemb_origin, dfsan_origin size_origin, - dfsan_origin *ret_origin) { - void *p = __dfsw_calloc(nmemb, size, nmemb_label, size_label, ret_label); - *ret_origin = 0; - return p; -} SANITIZER_INTERFACE_ATTRIBUTE size_t __dfsw_strlen(const char *s, dfsan_label s_label, dfsan_label *ret_label) { diff --git a/compiler-rt/lib/dfsan/dfsan_flags.inc b/compiler-rt/lib/dfsan/dfsan_flags.inc --- a/compiler-rt/lib/dfsan/dfsan_flags.inc +++ b/compiler-rt/lib/dfsan/dfsan_flags.inc @@ -40,3 +40,7 @@ "The depth limit of origin tracking stack traces.") DFSAN_FLAG(bool, check_origin_invariant, false, "Whether to check if the origin invariant holds.") +DFSAN_FLAG(bool, zero_in_malloc, true, + "Whether to zero shadow space of new allocated memory.") +DFSAN_FLAG(bool, zero_in_free, true, + "Whether to zero shadow space of deallocated memory.") diff --git a/compiler-rt/lib/dfsan/dfsan_interceptors.cpp b/compiler-rt/lib/dfsan/dfsan_interceptors.cpp --- a/compiler-rt/lib/dfsan/dfsan_interceptors.cpp +++ b/compiler-rt/lib/dfsan/dfsan_interceptors.cpp @@ -15,8 +15,14 @@ #include #include "dfsan/dfsan.h" +#include "dfsan/dfsan_thread.h" #include "interception/interception.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_errno.h" +#include "sanitizer_common/sanitizer_platform_limits_posix.h" +#include "sanitizer_common/sanitizer_posix.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" using namespace __sanitizer; @@ -26,44 +32,225 @@ } // namespace -INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags, - int fd, OFF_T offset) { - void *res; +INTERCEPTOR(void *, reallocarray, void *ptr, SIZE_T nmemb, SIZE_T size) { + return __dfsan::dfsan_reallocarray(ptr, nmemb, size); +} + +INTERCEPTOR(void *, __libc_memalign, SIZE_T alignment, SIZE_T size) { + void *ptr = __dfsan::dfsan_memalign(alignment, size); + if (ptr) + DTLS_on_libc_memalign(ptr, size); + return ptr; +} + +INTERCEPTOR(void *, aligned_alloc, SIZE_T alignment, SIZE_T size) { + return __dfsan::dfsan_aligned_alloc(alignment, size); +} + +static uptr allocated_for_dlsym; +static const uptr kDlsymAllocPoolSize = 1024; +static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; + +static bool IsInDlsymAllocPool(const void *ptr) { + uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym; + return off < sizeof(alloc_memory_for_dlsym); +} + +static void *AllocateFromLocalPool(uptr size_in_bytes) { + uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize; + void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym]; + allocated_for_dlsym += size_in_words; + CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize); + return mem; +} + +INTERCEPTOR(void *, calloc, SIZE_T nmemb, SIZE_T size) { + if (UNLIKELY(!__dfsan::dfsan_inited)) + // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. + return AllocateFromLocalPool(nmemb * size); + return __dfsan::dfsan_calloc(nmemb, size); +} + +INTERCEPTOR(void *, realloc, void *ptr, SIZE_T size) { + if (UNLIKELY(IsInDlsymAllocPool(ptr))) { + uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym; + uptr copy_size = Min(size, kDlsymAllocPoolSize - offset); + void *new_ptr; + if (UNLIKELY(!__dfsan::dfsan_inited)) { + new_ptr = AllocateFromLocalPool(copy_size); + } else { + copy_size = size; + new_ptr = __dfsan::dfsan_malloc(copy_size); + } + internal_memcpy(new_ptr, ptr, copy_size); + return new_ptr; + } + return __dfsan::dfsan_realloc(ptr, size); +} + +INTERCEPTOR(void *, malloc, SIZE_T size) { + if (UNLIKELY(!__dfsan::dfsan_inited)) + // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. + return AllocateFromLocalPool(size); + return __dfsan::dfsan_malloc(size); +} + +INTERCEPTOR(void, free, void *ptr) { + if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) + return; + return __dfsan::dfsan_deallocate(ptr); +} + +INTERCEPTOR(void, cfree, void *ptr) { + if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) + return; + return __dfsan::dfsan_deallocate(ptr); +} + +INTERCEPTOR(int, posix_memalign, void **memptr, SIZE_T alignment, SIZE_T size) { + CHECK_NE(memptr, 0); + int res = __dfsan::dfsan_posix_memalign(memptr, alignment, size); + if (!res) + dfsan_set_label(0, memptr, sizeof(*memptr)); + return res; +} + +INTERCEPTOR(void *, memalign, SIZE_T alignment, SIZE_T size) { + return __dfsan::dfsan_memalign(alignment, size); +} + +INTERCEPTOR(void *, valloc, SIZE_T size) { return __dfsan::dfsan_valloc(size); } + +INTERCEPTOR(void *, pvalloc, SIZE_T size) { + return __dfsan::dfsan_pvalloc(size); +} + +INTERCEPTOR(void, mallinfo, __sanitizer_struct_mallinfo *sret) { + internal_memset(sret, 0, sizeof(*sret)); + dfsan_set_label(0, sret, sizeof(*sret)); +} - // interceptors_initialized is set to true during preinit_array, when we're - // single-threaded. So we don't need to worry about accessing it atomically. - if (!interceptors_initialized) - res = (void *)syscall(__NR_mmap, addr, length, prot, flags, fd, offset); - else - res = REAL(mmap)(addr, length, prot, flags, fd, offset); +INTERCEPTOR(int, mallopt, int cmd, int value) { return 0; } - if (res != (void *)-1) +INTERCEPTOR(void, malloc_stats, void) { + // FIXME: implement, but don't call REAL(malloc_stats)! +} + +INTERCEPTOR(uptr, malloc_usable_size, void *ptr) { + return __sanitizer_get_allocated_size(ptr); +} + +#define ENSURE_DFSAN_INITED() \ + do { \ + CHECK(!__dfsan::dfsan_init_is_running); \ + if (!__dfsan::dfsan_inited) { \ + __dfsan::dfsan_init(); \ + } \ + } while (0) + +struct DFSanInterceptorContext {}; + +#define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ + if (__dfsan::dfsan_init_is_running) \ + return REAL(func)(__VA_ARGS__); \ + ENSURE_DFSAN_INITED(); \ + DFSanInterceptorContext dfsan_ctx = {}; \ + ctx = (void *)&dfsan_ctx; \ + (void)ctx; \ + dfsan_set_label(0, __errno_location(), sizeof(int)); /* NOLINT */ + +#define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED (!__dfsan::dfsan_inited) + +INTERCEPTOR(void *, mmap, void *addr, SIZE_T length, int prot, int flags, + int fd, OFF_T offset) { + void *ctx; + if (common_flags()->detect_write_exec) + ReportMmapWriteExec(prot); + if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) + return (void *)internal_mmap(addr, length, prot, flags, fd, offset); + COMMON_INTERCEPTOR_ENTER(ctx, mmap, addr, length, prot, flags, fd, offset); + void *res = REAL(mmap)(addr, length, prot, flags, fd, offset); + if (res != (void *)-1) { dfsan_set_label(0, res, RoundUpTo(length, GetPageSizeCached())); + } return res; } INTERCEPTOR(void *, mmap64, void *addr, SIZE_T length, int prot, int flags, int fd, OFF64_T offset) { + void *ctx; + if (common_flags()->detect_write_exec) + ReportMmapWriteExec(prot); + if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) + return (void *)internal_mmap(addr, length, prot, flags, fd, offset); + COMMON_INTERCEPTOR_ENTER(ctx, mmap64, addr, length, prot, flags, fd, offset); void *res = REAL(mmap64)(addr, length, prot, flags, fd, offset); - if (res != (void *)-1) + if (res != (void *)-1) { dfsan_set_label(0, res, RoundUpTo(length, GetPageSizeCached())); + } return res; } INTERCEPTOR(int, munmap, void *addr, SIZE_T length) { + void *ctx; + if (COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED) + return internal_munmap(addr, length); + COMMON_INTERCEPTOR_ENTER(ctx, munmap, addr, length); int res = REAL(munmap)(addr, length); if (res != -1) - dfsan_set_label(0, addr, RoundUpTo(length, GetPageSizeCached())); + __dfsan::dfsan_release_meta_memory(addr, + RoundUpTo(length, GetPageSizeCached())); + return res; +} + +#define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ + if (__dfsan::DFsanThread *t = __dfsan::GetCurrentThread()) { \ + *begin = t->tls_begin(); \ + *end = t->tls_end(); \ + } else { \ + *begin = *end = 0; \ + } +#define COMMON_INTERCEPTOR_INITIALIZE_RANGE(ptr, size) \ + dfsan_set_label(0, ptr, size) + +INTERCEPTOR(void *, __tls_get_addr, void *arg) { + void *ctx; + COMMON_INTERCEPTOR_ENTER(ctx, __tls_get_addr, arg); + void *res = REAL(__tls_get_addr)(arg); + uptr tls_begin, tls_end; + COMMON_INTERCEPTOR_GET_TLS_RANGE(&tls_begin, &tls_end); + DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, tls_begin, tls_end); + if (dtv) { + // New DTLS block has been allocated. + COMMON_INTERCEPTOR_INITIALIZE_RANGE((void *)dtv->beg, dtv->size); + } return res; } namespace __dfsan { -void InitializeInterceptors() { +void initialize_interceptors() { CHECK(!interceptors_initialized); + INTERCEPT_FUNCTION(aligned_alloc); + INTERCEPT_FUNCTION(calloc); + INTERCEPT_FUNCTION(cfree); + INTERCEPT_FUNCTION(free); + INTERCEPT_FUNCTION(mallinfo); + INTERCEPT_FUNCTION(malloc); + INTERCEPT_FUNCTION(malloc_stats); + INTERCEPT_FUNCTION(malloc_usable_size); + INTERCEPT_FUNCTION(mallopt); + INTERCEPT_FUNCTION(memalign); INTERCEPT_FUNCTION(mmap); INTERCEPT_FUNCTION(mmap64); INTERCEPT_FUNCTION(munmap); + INTERCEPT_FUNCTION(posix_memalign); + INTERCEPT_FUNCTION(pvalloc); + INTERCEPT_FUNCTION(realloc); + INTERCEPT_FUNCTION(reallocarray); + INTERCEPT_FUNCTION(valloc); + INTERCEPT_FUNCTION(__tls_get_addr); + INTERCEPT_FUNCTION(__libc_memalign); interceptors_initialized = true; } diff --git a/compiler-rt/lib/dfsan/dfsan_new_delete.cpp b/compiler-rt/lib/dfsan/dfsan_new_delete.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/dfsan/dfsan_new_delete.cpp @@ -0,0 +1,129 @@ +//===-- dfsan_new_delete.cpp +//-----------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of DataflowSanitizer. +// +// Interceptors for operators new and delete. +//===----------------------------------------------------------------------===// + +#include "dfsan.h" +#include "interception/interception.h" +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_report.h" + +#if DFSAN_REPLACE_OPERATORS_NEW_AND_DELETE + +#include + +using namespace __dfsan; + +// Fake std::nothrow_t and std::align_val_t to avoid including . +namespace std { +struct nothrow_t {}; +enum class align_val_t : size_t {}; +} // namespace std + +// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. +#define OPERATOR_NEW_BODY(nothrow) \ + void *res = dfsan_malloc(size); \ + if (!nothrow && UNLIKELY(!res)) { \ + BufferedStackTrace stack; \ + ReportOutOfMemory(size, &stack); \ + } \ + return res +#define OPERATOR_NEW_BODY_ALIGN(nothrow) \ + void *res = dfsan_memalign((uptr)align, size); \ + if (!nothrow && UNLIKELY(!res)) { \ + BufferedStackTrace stack; \ + ReportOutOfMemory(size, &stack); \ + } \ + return res; + +INTERCEPTOR_ATTRIBUTE +void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new(size_t size, std::nothrow_t const &) { + OPERATOR_NEW_BODY(true /*nothrow*/); +} +INTERCEPTOR_ATTRIBUTE +void *operator new[](size_t size, std::nothrow_t const &) { + OPERATOR_NEW_BODY(true /*nothrow*/); +} +INTERCEPTOR_ATTRIBUTE +void *operator new(size_t size, std::align_val_t align) { + OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); +} +INTERCEPTOR_ATTRIBUTE +void *operator new[](size_t size, std::align_val_t align) { + OPERATOR_NEW_BODY_ALIGN(false /*nothrow*/); +} +INTERCEPTOR_ATTRIBUTE +void *operator new(size_t size, std::align_val_t align, + std::nothrow_t const &) { + OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); +} +INTERCEPTOR_ATTRIBUTE +void *operator new[](size_t size, std::align_val_t align, + std::nothrow_t const &) { + OPERATOR_NEW_BODY_ALIGN(true /*nothrow*/); +} + +#define OPERATOR_DELETE_BODY \ + if (ptr) \ + dfsan_deallocate(ptr) + +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr)NOEXCEPT { OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr, std::nothrow_t const &) { + OPERATOR_DELETE_BODY; +} +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr, std::nothrow_t const &) { + OPERATOR_DELETE_BODY; +} +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr, size_t size)NOEXCEPT { OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr, size_t size) NOEXCEPT { + OPERATOR_DELETE_BODY; +} +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr, std::align_val_t align)NOEXCEPT { + OPERATOR_DELETE_BODY; +} +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr, std::align_val_t align) NOEXCEPT { + OPERATOR_DELETE_BODY; +} +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr, std::align_val_t align, + std::nothrow_t const &) { + OPERATOR_DELETE_BODY; +} +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr, std::align_val_t align, + std::nothrow_t const &) { + OPERATOR_DELETE_BODY; +} +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr, size_t size, std::align_val_t align)NOEXCEPT { + OPERATOR_DELETE_BODY; +} +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr, size_t size, + std::align_val_t align) NOEXCEPT { + OPERATOR_DELETE_BODY; +} + +#endif // DFSAN_REPLACE_OPERATORS_NEW_AND_DELETE diff --git a/compiler-rt/lib/dfsan/dfsan_thread.h b/compiler-rt/lib/dfsan/dfsan_thread.h --- a/compiler-rt/lib/dfsan/dfsan_thread.h +++ b/compiler-rt/lib/dfsan/dfsan_thread.h @@ -14,6 +14,7 @@ #ifndef DFSAN_THREAD_H #define DFSAN_THREAD_H +#include "dfsan_allocator.h" #include "sanitizer_common/sanitizer_common.h" namespace __dfsan { @@ -34,16 +35,21 @@ uptr stack_top(); uptr stack_bottom(); + uptr tls_begin() { return tls_begin_; } + uptr tls_end() { return tls_end_; } bool IsMainThread() { return start_routine_ == nullptr; } bool InSignalHandler() { return in_signal_handler_; } void EnterSignalHandler() { in_signal_handler_++; } void LeaveSignalHandler() { in_signal_handler_--; } + DFsanThreadLocalMallocStorage &malloc_storage() { return malloc_storage_; } + int destructor_iterations_; private: void SetThreadStackAndTls(); + void ClearShadowForThreadStackAndTLS(); struct StackBounds { uptr bottom; uptr top; @@ -59,7 +65,12 @@ StackBounds stack_; + uptr tls_begin_; + uptr tls_end_; + unsigned in_signal_handler_; + + DFsanThreadLocalMallocStorage malloc_storage_; }; DFsanThread *GetCurrentThread(); diff --git a/compiler-rt/lib/dfsan/dfsan_thread.cpp b/compiler-rt/lib/dfsan/dfsan_thread.cpp --- a/compiler-rt/lib/dfsan/dfsan_thread.cpp +++ b/compiler-rt/lib/dfsan/dfsan_thread.cpp @@ -3,6 +3,7 @@ #include #include "dfsan.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" namespace __dfsan { @@ -24,16 +25,30 @@ void DFsanThread::SetThreadStackAndTls() { uptr tls_size = 0; uptr stack_size = 0; - uptr tls_begin; - GetThreadStackAndTls(IsMainThread(), &stack_.bottom, &stack_size, &tls_begin, + GetThreadStackAndTls(IsMainThread(), &stack_.bottom, &stack_size, &tls_begin_, &tls_size); stack_.top = stack_.bottom + stack_size; + tls_end_ = tls_begin_ + tls_size; int local; CHECK(AddrIsInStack((uptr)&local)); } -void DFsanThread::Init() { SetThreadStackAndTls(); } +void DFsanThread::ClearShadowForThreadStackAndTLS() { + dfsan_set_label(0, (void *)stack_.bottom, stack_.top - stack_.bottom); + if (tls_begin_ != tls_end_) + dfsan_set_label(0, (void *)tls_begin_, tls_end_ - tls_begin_); + DTLS *dtls = DTLS_Get(); + CHECK_NE(dtls, 0); + ForEachDVT(dtls, [](const DTLS::DTV &dtv, int id) { + dfsan_set_label(0, (void *)(dtv.beg), dtv.size); + }); +} + +void DFsanThread::Init() { + SetThreadStackAndTls(); + ClearShadowForThreadStackAndTLS(); +} void DFsanThread::TSDDtor(void *tsd) { DFsanThread *t = (DFsanThread *)tsd; @@ -41,8 +56,14 @@ } void DFsanThread::Destroy() { + malloc_storage().CommitBack(); + // We also clear the shadow on thread destruction because + // some code may still be executing in later TSD destructors + // and we don't want it to have any poisoned stack. + ClearShadowForThreadStackAndTLS(); uptr size = RoundUpTo(sizeof(DFsanThread), GetPageSizeCached()); UnmapOrDie(this, size); + DTLS_Destroy(); } thread_return_t DFsanThread::ThreadStart() { diff --git a/compiler-rt/lib/dfsan/done_abilist.txt b/compiler-rt/lib/dfsan/done_abilist.txt --- a/compiler-rt/lib/dfsan/done_abilist.txt +++ b/compiler-rt/lib/dfsan/done_abilist.txt @@ -39,8 +39,23 @@ ############################################################################### # glibc ############################################################################### +# Functions of memory allocators +fun:__libc_memalign=discard +fun:aligned_alloc=discard +fun:calloc=discard +fun:cfree=discard +fun:mallinfo=discard fun:malloc=discard fun:free=discard +fun:malloc_stats=discard +fun:malloc_usable_size=discard +fun:mallopt=discard +fun:memalign=discard +fun:posix_memalign=discard +fun:pvalloc=discard +fun:realloc=discard +fun:reallocarray=discard +fun:valloc=discard # Functions that return a value that depends on the input, but the output might # not be necessarily data-dependent on the input. @@ -155,7 +170,6 @@ fun:openat=discard fun:pipe=discard fun:posix_fadvise=discard -fun:posix_memalign=discard fun:prctl=discard fun:printf=discard fun:pthread_sigmask=discard @@ -191,7 +205,6 @@ # Functions that produce output does not depend on the input (need to zero the # shadow manually). fun:_dl_get_tls_static_info=custom -fun:calloc=custom fun:clock_gettime=custom fun:dlopen=custom fun:epoll_wait=custom @@ -399,6 +412,36 @@ fun:__sanitizer_cov_pcs_init=uninstrumented fun:__sanitizer_cov_pcs_init=discard +fun:__sanitizer_get_current_allocated_bytes=uninstrumented +fun:__sanitizer_get_current_allocated_bytes=discard +fun:__sanitizer_get_heap_size=uninstrumented +fun:__sanitizer_get_heap_size=discard +fun:__sanitizer_get_free_bytes=uninstrumented +fun:__sanitizer_get_free_bytes=discard +fun:__sanitizer_get_unmapped_bytes=uninstrumented +fun:__sanitizer_get_unmapped_bytes=discard +fun:__sanitizer_get_estimated_allocated_size=uninstrumented +fun:__sanitizer_get_estimated_allocated_size=discard +fun:__sanitizer_get_ownership=uninstrumented +fun:__sanitizer_get_ownership=discard +fun:__sanitizer_get_allocated_size=uninstrumented +fun:__sanitizer_get_allocated_size=discard +fun:__sanitizer_print_stack_trace=uninstrumented +fun:__sanitizer_print_stack_trace=discard + +fun:TcmallocSlab_Internal_PushBatch_FixedShift=uninstrumented +fun:TcmallocSlab_Internal_PushBatch_FixedShift=discard +fun:TcmallocSlab_Internal_PushBatch_FixedShift_VCPU=uninstrumented +fun:TcmallocSlab_Internal_PushBatch_FixedShift_VCPU=discard +fun:TcmallocSlab_Internal_PerCpuCmpxchg64=uninstrumented +fun:TcmallocSlab_Internal_PerCpuCmpxchg64=discard +fun:TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU=uninstrumented +fun:TcmallocSlab_Internal_PerCpuCmpxchg64_VCPU=discard +fun:TcmallocSlab_Internal_PopBatch_FixedShift=uninstrumented +fun:TcmallocSlab_Internal_PopBatch_FixedShift=discard +fun:TcmallocSlab_Internal_PopBatch_FixedShift_VCPU=uninstrumented +fun:TcmallocSlab_Internal_PopBatch_FixedShift_VCPU=discard + # Ignores the dfsan wrappers. fun:__dfsw_*=uninstrumented fun:__dfsw_*=discard diff --git a/compiler-rt/test/dfsan/interceptors.c b/compiler-rt/test/dfsan/interceptors.c --- a/compiler-rt/test/dfsan/interceptors.c +++ b/compiler-rt/test/dfsan/interceptors.c @@ -1,32 +1,182 @@ -// RUN: %clang_dfsan -fno-sanitize=dataflow -DCALLOC -c %s -o %t-calloc.o -// RUN: %clang_dfsan %s %t-calloc.o -o %t -// RUN: %run %t +// RUN: %clang_dfsan -mllvm -dfsan-fast-16-labels -mllvm -dfsan-combine-pointer-labels-on-load=false %s -o %t && %run %t +// RUN: %clang_dfsan -DORIGIN_TRACKING -mllvm -dfsan-fast-16-labels -mllvm -dfsan-track-origins=1 -mllvm -dfsan-combine-pointer-labels-on-load=false %s -o %t && %run %t // -// Tests that calling mmap() during during dfsan initialization works. +// Tests custom implementations of various glibc functions. +// +// REQUIRES: x86_64-target-arch -#include #include -#include + +#include +#include +#include #include -#include -#ifdef CALLOC +#define ASSERT_ZERO_LABEL(data) \ + assert(0 == dfsan_get_label((long) (data))) + +#define ASSERT_READ_ZERO_LABEL(ptr, size) \ + assert(0 == dfsan_read_label(ptr, size)) + +const int kAlignment = 8; +const int kSize = 16; + +void test_aligned_alloc() { + char *p = (char *) aligned_alloc(kAlignment, kSize); + ASSERT_ZERO_LABEL(p); + ASSERT_READ_ZERO_LABEL(p, kSize); + free(p); +} + +void test_calloc() { + char *p = (char *) calloc(kSize, 1); + ASSERT_ZERO_LABEL(p); + ASSERT_READ_ZERO_LABEL(p, kSize); + free(p); +} + +void test_cfree() { + // The current glibc does not support cfree. +} + +void test_free() { + char *p = (char *) malloc(kSize); + dfsan_set_label(1, p, kSize); + free(p); + ASSERT_READ_ZERO_LABEL(p, kSize); +} + +void test_mallinfo() { + struct mallinfo mi = mallinfo(); + for (int i = 0; i < sizeof(struct mallinfo); ++i) { + char c = ((char *)(&mi))[i]; + assert(!c); + ASSERT_ZERO_LABEL(c); + } +} + +void test_malloc() { + char *p = (char *) malloc(kSize); + ASSERT_ZERO_LABEL(p); + ASSERT_READ_ZERO_LABEL(p, kSize); + free(p); +} + +void test_malloc_stats() { + // Only ensures it does not crash. Our interceptor of malloc_stats is empty. + malloc_stats(); +} + +void test_malloc_usable_size() { + char *p = (char *) malloc(kSize); + size_t s = malloc_usable_size(p); + assert(s == kSize); + ASSERT_ZERO_LABEL(s); + free(p); +} + +void test_mallopt() { + int r = mallopt(0, 0); + assert(!r); + ASSERT_ZERO_LABEL(r); +} + +void test_memalign() { + char *p = (char *) memalign(kAlignment, kSize); + ASSERT_ZERO_LABEL(p); + ASSERT_READ_ZERO_LABEL(p, kSize); + free(p); +} + +void test_mmap() { + char *p = mmap(NULL, kSize, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + ASSERT_READ_ZERO_LABEL(p, kSize); + munmap(p, kSize); +} + +void test_mmap64() { + // The current glibc does not support mmap64. +} + +void test_unmmap() { + char *p = mmap(NULL, kSize, PROT_READ | PROT_WRITE, + MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); + munmap(p, kSize); + ASSERT_READ_ZERO_LABEL(p, kSize); +} -// dfsan_init() installs interceptors via dlysm(), which calls calloc(). -// Calling mmap() from here should work even if interceptors haven't been fully -// set up yet. -void *calloc(size_t Num, size_t Size) { - size_t PageSize = getpagesize(); - Size = Size * Num; - Size = (Size + PageSize - 1) & ~(PageSize - 1); // Round up to PageSize. - void *Ret = mmap(NULL, Size, PROT_READ | PROT_WRITE, - MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); - assert(Ret != MAP_FAILED); - return Ret; +void test_posix_memalign() { + char *p; + dfsan_set_label(1, &p, sizeof(p)); + int r = posix_memalign((void **)&p, kAlignment, kSize); + assert(!r); + ASSERT_ZERO_LABEL(p); + ASSERT_READ_ZERO_LABEL(p, kSize); + free(p); } -#else +void test_pvalloc() { + char *p = (char *) pvalloc(kSize); + ASSERT_ZERO_LABEL(p); + ASSERT_READ_ZERO_LABEL(p, kSize); + free(p); +} + +void test_realloc() { + char *p = (char *) malloc(kSize); + + char *q = (char *) realloc(p, kSize * 2); + ASSERT_ZERO_LABEL(q); + ASSERT_READ_ZERO_LABEL(q, kSize * 2); -int main() { return 0; } + char *x = (char *) realloc(q, kSize); + ASSERT_ZERO_LABEL(x); + ASSERT_READ_ZERO_LABEL(x, kSize); -#endif // CALLOC + free(x); +} + +void test_reallocarray() { + // The current glibc does not support reallocarray. +} + +void test_valloc() { + char *p = (char *) valloc(kSize); + ASSERT_ZERO_LABEL(p); + ASSERT_READ_ZERO_LABEL(p, kSize); + free(p); +} + +void test___libc_memalign() { + // The current glibc does not support __libc_memalign. +} + +void test___tls_get_addr() { + // The current glibc does not support __tls_get_addr. +} + +int main(void) { + // With any luck this sequence of calls will cause allocators to return the + // same pointer. This is probably the best we can do to test these functions. + test_aligned_alloc(); + test_calloc(); + test_cfree(); + test_free(); + test_mallinfo(); + test_malloc(); + test_malloc_stats(); + test_malloc_usable_size(); + test_mallopt(); + test_memalign(); + test_mmap(); + test_mmap64(); + test_unmmap(); + test_posix_memalign(); + test_pvalloc(); + test_realloc(); + test_reallocarray(); + test_valloc(); + test___libc_memalign(); + test___tls_get_addr(); +} diff --git a/compiler-rt/test/dfsan/interceptors.c b/compiler-rt/test/dfsan/mmap_at_init.c copy from compiler-rt/test/dfsan/interceptors.c copy to compiler-rt/test/dfsan/mmap_at_init.c --- a/compiler-rt/test/dfsan/interceptors.c +++ b/compiler-rt/test/dfsan/mmap_at_init.c @@ -1,5 +1,5 @@ // RUN: %clang_dfsan -fno-sanitize=dataflow -DCALLOC -c %s -o %t-calloc.o -// RUN: %clang_dfsan %s %t-calloc.o -o %t +// RUN: %clang_dfsan %s %t-calloc.o -Wl,-z,notext -o %t // RUN: %run %t // // Tests that calling mmap() during during dfsan initialization works.