diff --git a/compiler-rt/lib/dfsan/CMakeLists.txt b/compiler-rt/lib/dfsan/CMakeLists.txt --- a/compiler-rt/lib/dfsan/CMakeLists.txt +++ b/compiler-rt/lib/dfsan/CMakeLists.txt @@ -3,6 +3,7 @@ # Runtime library sources and build flags. set(DFSAN_RTL_SOURCES dfsan.cpp + dfsan_allocator.cpp dfsan_chained_origin_depot.cpp dfsan_custom.cpp dfsan_interceptors.cpp @@ -11,6 +12,7 @@ set(DFSAN_RTL_HEADERS dfsan.h + dfsan_allocator.h dfsan_chained_origin_depot.h dfsan_flags.inc dfsan_flags.h diff --git a/compiler-rt/lib/dfsan/dfsan.h b/compiler-rt/lib/dfsan/dfsan.h --- a/compiler-rt/lib/dfsan/dfsan.h +++ b/compiler-rt/lib/dfsan/dfsan.h @@ -19,6 +19,10 @@ #include "dfsan_flags.h" #include "dfsan_platform.h" +#ifndef DFSAN_REPLACE_OPERATORS_NEW_AND_DELETE +#define DFSAN_REPLACE_OPERATORS_NEW_AND_DELETE 1 +#endif + using __sanitizer::u16; using __sanitizer::u32; using __sanitizer::uptr; @@ -63,7 +67,10 @@ namespace __dfsan { -void InitializeInterceptors(); +extern int dfsan_inited; +extern bool dfsan_init_is_running; + +void initialize_interceptors(); inline dfsan_label *shadow_for(void *ptr) { return (dfsan_label *) ((((uptr) ptr) & ShadowMask()) << 1); @@ -96,6 +103,24 @@ return is_shadow_addr_valid((uptr)ptr_s); } +void dfsan_copy_memory(void *dst, const void *src, uptr size); +void dfsan_release_meta_memory(const void *addr, uptr size); + +void dfsan_allocator_init(); +void dfsan_deallocate(void *ptr); + +void *dfsan_malloc(uptr size); +void *dfsan_calloc(uptr nmemb, uptr size); +void *dfsan_realloc(void *ptr, uptr size); +void *dfsan_reallocarray(void *ptr, uptr nmemb, uptr size); +void *dfsan_valloc(uptr size); +void *dfsan_pvalloc(uptr size); +void *dfsan_aligned_alloc(uptr alignment, uptr size); +void *dfsan_memalign(uptr alignment, uptr size); +int dfsan_posix_memalign(void **memptr, uptr alignment, uptr size); + +void dfsan_init(); + } // namespace __dfsan #endif // DFSAN_H diff --git a/compiler-rt/lib/dfsan/dfsan.cpp b/compiler-rt/lib/dfsan/dfsan.cpp --- a/compiler-rt/lib/dfsan/dfsan.cpp +++ b/compiler-rt/lib/dfsan/dfsan.cpp @@ -21,7 +21,6 @@ #include "dfsan/dfsan.h" #include "dfsan/dfsan_chained_origin_depot.h" -#include "dfsan/dfsan_flags.h" #include "dfsan/dfsan_origin.h" #include "dfsan/dfsan_thread.h" #include "sanitizer_common/sanitizer_atomic.h" @@ -540,10 +539,18 @@ *(u32 *)(end - kOriginAlign) = origin; } -static void WriteShadowIfDifferent(dfsan_label label, uptr shadow_addr, - uptr size) { - dfsan_label *labelp = (dfsan_label *)shadow_addr; - for (; size != 0; --size, ++labelp) { +static void WriteShadowInRange(dfsan_label label, uptr beg_shadow_addr, + uptr end_shadow_addr) { + // TODO: After changing dfsan_label to 8bit, use internal_memset when label + // is not 0. + if (label) { + dfsan_label *labelp = (dfsan_label *)beg_shadow_addr; + for (; (uptr)labelp < end_shadow_addr; ++labelp) *labelp = label; + return; + } + + dfsan_label *labelp = (dfsan_label *)beg_shadow_addr; + for (; (uptr)labelp < end_shadow_addr; ++labelp) { // Don't write the label if it is already the value we need it to be. // In a program where most addresses are not labeled, it is common that // a page of shadow memory is entirely zeroed. The Linux copy-on-write @@ -552,13 +559,18 @@ // the value written does not change the value in memory. Avoiding the // write when both |label| and |*labelp| are zero dramatically reduces // the amount of real memory used by large programs. - if (label == *labelp) + if (!*labelp) continue; - *labelp = label; + *labelp = 0; } } +static void WriteShadowWithSize(dfsan_label label, uptr shadow_addr, + uptr size) { + WriteShadowInRange(label, shadow_addr, shadow_addr + size * sizeof(label)); +} + #define RET_CHAIN_ORIGIN(id) \ GET_CALLER_PC_BP_SP; \ (void)sp; \ @@ -597,6 +609,37 @@ __dfsan_mem_origin_transfer(dst, src, len); } +namespace __dfsan { + +int dfsan_inited = 0; +bool dfsan_init_is_running = false; + +void dfsan_copy_memory(void *dst, const void *src, uptr size) { + internal_memcpy(dst, src, size); + internal_memcpy((void *)shadow_for(dst), (const void *)shadow_for(src), + size * sizeof(dfsan_label)); + if (__dfsan_get_track_origins()) + dfsan_mem_origin_transfer(dst, src, size); +} + +void dfsan_release_meta_memory(const void *addr, uptr size) { + dfsan_set_label(0, (void *)addr, size); + + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr); + const void *end_addr = (void *)((uptr)addr + size); + const uptr end_shadow_addr = (uptr)__dfsan::shadow_for(end_addr); + ReleaseMemoryPagesToOS(beg_shadow_addr, end_shadow_addr); + if (__dfsan_get_track_origins()) { + const uptr beg_origin_addr = (uptr)__dfsan::origin_for(addr); + const uptr end_origin_addr = (uptr)__dfsan::origin_for(end_addr); + ReleaseMemoryPagesToOS(beg_origin_addr, end_origin_addr); + } +} + +} // namespace __dfsan + // If the label s is tainted, set the size bytes from the address p to be a new // origin chain with the previous ID o and the current stack trace. This is // used by instrumentation to reduce code size when too much code is inserted. @@ -610,63 +653,62 @@ } } -// Releases the pages within the origin address range, and sets the origin -// addresses not on the pages to be 0. -static void ReleaseOrClearOrigins(void *addr, uptr size) { +// Releases the pages within the origin address range. +static void ReleaseOrigins(void *addr, uptr size) { const uptr beg_origin_addr = (uptr)__dfsan::origin_for(addr); const void *end_addr = (void *)((uptr)addr + size); const uptr end_origin_addr = (uptr)__dfsan::origin_for(end_addr); + + if (end_origin_addr - beg_origin_addr < + common_flags()->clear_shadow_mmap_threshold) + return; + const uptr page_size = GetPageSizeCached(); const uptr beg_aligned = RoundUpTo(beg_origin_addr, page_size); const uptr end_aligned = RoundDownTo(end_origin_addr, page_size); - // dfsan_set_label can be called from the following cases - // 1) mapped ranges by new/delete and malloc/free. This case has origin memory - // size > 50k, and happens less frequently. - // 2) zero-filling internal data structures by utility libraries. This case - // has origin memory size < 16k, and happens more often. - // Set kNumPagesThreshold to be 4 to avoid releasing small pages. - const int kNumPagesThreshold = 4; - if (beg_aligned + kNumPagesThreshold * page_size >= end_aligned) - return; - - ReleaseMemoryPagesToOS(beg_aligned, end_aligned); + if (!MmapFixedSuperNoReserve(beg_aligned, end_aligned - beg_aligned)) + Die(); } void SetShadow(dfsan_label label, void *addr, uptr size, dfsan_origin origin) { const uptr beg_shadow_addr = (uptr)__dfsan::shadow_for(addr); if (0 != label) { - WriteShadowIfDifferent(label, beg_shadow_addr, size); + WriteShadowWithSize(label, beg_shadow_addr, size); if (__dfsan_get_track_origins()) SetOrigin(addr, size, origin); return; } if (__dfsan_get_track_origins()) - ReleaseOrClearOrigins(addr, size); + ReleaseOrigins(addr, size); // If label is 0, releases the pages within the shadow address range, and sets // the shadow addresses not on the pages to be 0. const void *end_addr = (void *)((uptr)addr + size); const uptr end_shadow_addr = (uptr)__dfsan::shadow_for(end_addr); + + if (end_shadow_addr - beg_shadow_addr < + common_flags()->clear_shadow_mmap_threshold) + return WriteShadowWithSize(label, beg_shadow_addr, size); + const uptr page_size = GetPageSizeCached(); const uptr beg_aligned = RoundUpTo(beg_shadow_addr, page_size); const uptr end_aligned = RoundDownTo(end_shadow_addr, page_size); - // dfsan_set_label can be called from the following cases - // 1) mapped ranges by new/delete and malloc/free. This case has shadow memory - // size > 100k, and happens less frequently. - // 2) zero-filling internal data structures by utility libraries. This case - // has shadow memory size < 32k, and happens more often. - // Set kNumPagesThreshold to be 8 to avoid releasing small pages. - const int kNumPagesThreshold = 8; - if (beg_aligned + kNumPagesThreshold * page_size >= end_aligned) - return WriteShadowIfDifferent(label, beg_shadow_addr, size); - - WriteShadowIfDifferent(label, beg_shadow_addr, beg_aligned - beg_shadow_addr); + if (beg_aligned >= end_aligned) { + WriteShadowWithSize(0, beg_shadow_addr, size); + } else { + if (beg_aligned != beg_shadow_addr) + WriteShadowInRange(0, beg_shadow_addr, beg_aligned); + if (end_aligned != end_shadow_addr) + WriteShadowInRange(0, end_aligned, end_shadow_addr); + if (!MmapFixedSuperNoReserve(beg_aligned, end_aligned - beg_aligned)) + Die(); + } + ReleaseMemoryPagesToOS(beg_aligned, end_aligned); - WriteShadowIfDifferent(label, end_aligned, end_shadow_addr - end_aligned); } extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __dfsan_set_label( @@ -916,6 +958,12 @@ static void InitializeFlags() { SetCommonFlagsDefaults(); + { + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.intercept_tls_get_addr = true; + OverrideCommonFlags(cf); + } flags().SetDefaults(); FlagParser parser; @@ -981,7 +1029,13 @@ Die(); } -static void dfsan_init(int argc, char **argv, char **envp) { +static void DFsanInit(int argc, char **argv, char **envp) { + CHECK(!dfsan_init_is_running); + if (dfsan_inited) + return; + dfsan_init_is_running = true; + SanitizerToolName = "DataflowSanitizer"; + InitializeFlags(); ::InitializePlatformEarly(); @@ -995,11 +1049,11 @@ // will load our executable in the middle of our unused region. This mostly // works so long as the program doesn't use too much memory. We support this // case by disabling memory protection when ASLR is disabled. - uptr init_addr = (uptr)&dfsan_init; + uptr init_addr = (uptr)&DFsanInit; if (!(init_addr >= UnusedAddr() && init_addr < AppAddr())) MmapFixedNoAccess(UnusedAddr(), AppAddr() - UnusedAddr()); - InitializeInterceptors(); + initialize_interceptors(); // Register the fini callback to run when the program terminates successfully // or it is killed by the runtime. @@ -1008,14 +1062,27 @@ // Set up threads DFsanTSDInit(DFsanTSDDtor); + + dfsan_allocator_init(); + DFsanThread *main_thread = DFsanThread::Create(nullptr, nullptr, nullptr); SetCurrentThread(main_thread); main_thread->ThreadStart(); __dfsan_label_info[kInitializingLabel].desc = ""; + + dfsan_init_is_running = true; + dfsan_inited = 1; } +namespace __dfsan { + +void dfsan_init() { DFsanInit(0, nullptr, nullptr); } + +} // namespace __dfsan + #if SANITIZER_CAN_USE_PREINIT_ARRAY -__attribute__((section(".preinit_array"), used)) -static void (*dfsan_init_ptr)(int, char **, char **) = dfsan_init; +__attribute__((section(".preinit_array"), + used)) static void (*dfsan_init_ptr)(int, char **, + char **) = DFsanInit; #endif diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.h b/compiler-rt/lib/dfsan/dfsan_allocator.h new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/dfsan/dfsan_allocator.h @@ -0,0 +1,30 @@ +//===-- dfsan_allocator.h ---------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of DataflowSanitizer. +// +//===----------------------------------------------------------------------===// + +#ifndef DFSAN_ALLOCATOR_H +#define DFSAN_ALLOCATOR_H + +#include "sanitizer_common/sanitizer_common.h" + +namespace __dfsan { + +struct DFsanThreadLocalMallocStorage { + ALIGNED(8) uptr allocator_cache[96 * (512 * 8 + 16)]; // Opaque. + void CommitBack(); + + private: + // These objects are allocated via mmap() and are zero-initialized. + DFsanThreadLocalMallocStorage() {} +}; + +} // namespace __dfsan +#endif // DFSAN_ALLOCATOR_H diff --git a/compiler-rt/lib/dfsan/dfsan_allocator.cpp b/compiler-rt/lib/dfsan/dfsan_allocator.cpp new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/dfsan/dfsan_allocator.cpp @@ -0,0 +1,293 @@ +//===-- dfsan_allocator.cpp -------------------------- --------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of DataflowSanitizer. +// +// DataflowSanitizer allocator. +//===----------------------------------------------------------------------===// + +#include "dfsan_allocator.h" + +#include "dfsan.h" +#include "dfsan_thread.h" +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_checks.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_allocator_report.h" +#include "sanitizer_common/sanitizer_errno.h" + +namespace __dfsan { + +struct Metadata { + uptr requested_size; +}; + +struct DFsanMapUnmapCallback { + void OnMap(uptr p, uptr size) const { dfsan_set_label(0, (void *)p, size); } + void OnUnmap(uptr p, uptr size) const { + __dfsan::dfsan_release_meta_memory((const void *)p, size); + } +}; + +// TODO: DFSan assumes application memory starts from 0x700000008000. For +// unknown reason, the sanitizer allocator does not support any start address +// between 0x701000000000 and 0x700000008000. After switching to fast8labels +// mode, DFSan memory layout will be changed to the same to MSan's. Then we +// set the start address to 0x700000000000 as MSan. +static const uptr kAllocatorSpace = 0x701000000000ULL; +static const uptr kMaxAllowedMallocSize = 8UL << 30; + +struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = kAllocatorSpace; + static const uptr kSpaceSize = 0x40000000000; // 4T. + static const uptr kMetadataSize = sizeof(Metadata); + typedef DefaultSizeClassMap SizeClassMap; + typedef DFsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + using AddressSpaceView = LocalAddressSpaceView; +}; + +typedef SizeClassAllocator64 PrimaryAllocator; + +typedef CombinedAllocator Allocator; +typedef Allocator::AllocatorCache AllocatorCache; + +static Allocator allocator; +static AllocatorCache fallback_allocator_cache; +static StaticSpinMutex fallback_mutex; + +static uptr max_malloc_size; + +void dfsan_allocator_init() { + SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); + allocator.Init(common_flags()->allocator_release_to_os_interval_ms); + if (common_flags()->max_allocation_size_mb) + max_malloc_size = Min(common_flags()->max_allocation_size_mb << 20, + kMaxAllowedMallocSize); + else + max_malloc_size = kMaxAllowedMallocSize; +} + +AllocatorCache *GetAllocatorCache(DFsanThreadLocalMallocStorage *ms) { + CHECK(ms); + CHECK_LE(sizeof(AllocatorCache), sizeof(ms->allocator_cache)); + return reinterpret_cast(ms->allocator_cache); +} + +void DFsanThreadLocalMallocStorage::CommitBack() { + allocator.SwallowCache(GetAllocatorCache(this)); +} + +static void *DFsanAllocate(uptr size, uptr alignment, bool zeroise) { + if (size > max_malloc_size) { + if (AllocatorMayReturnNull()) { + Report("WARNING: DataflowSanitizer failed to allocate 0x%zx bytes\n", + size); + return nullptr; + } + BufferedStackTrace stack; + ReportAllocationSizeTooBig(size, max_malloc_size, &stack); + } + DFsanThread *t = GetCurrentThread(); + void *allocated; + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocated = allocator.Allocate(cache, size, alignment); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocated = allocator.Allocate(cache, size, alignment); + } + if (UNLIKELY(!allocated)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + BufferedStackTrace stack; + ReportOutOfMemory(size, &stack); + } + Metadata *meta = + reinterpret_cast(allocator.GetMetaData(allocated)); + meta->requested_size = size; + if (zeroise) { + internal_memset(allocated, 0, size); + dfsan_set_label(0, allocated, size); + } else if (flags().zero_in_malloc) { + dfsan_set_label(0, allocated, size); + } + return allocated; +} + +void dfsan_deallocate(void *p) { + CHECK(p); + Metadata *meta = reinterpret_cast(allocator.GetMetaData(p)); + uptr size = meta->requested_size; + meta->requested_size = 0; + if (flags().zero_in_free) + dfsan_set_label(0, p, size); + DFsanThread *t = GetCurrentThread(); + if (t) { + AllocatorCache *cache = GetAllocatorCache(&t->malloc_storage()); + allocator.Deallocate(cache, p); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocator.Deallocate(cache, p); + } +} + +void *DFsanReallocate(void *old_p, uptr new_size, uptr alignment) { + Metadata *meta = reinterpret_cast(allocator.GetMetaData(old_p)); + uptr old_size = meta->requested_size; + uptr actually_allocated_size = allocator.GetActuallyAllocatedSize(old_p); + if (new_size <= actually_allocated_size) { + // We are not reallocating here. + meta->requested_size = new_size; + if (new_size > old_size && flags().zero_in_malloc) + dfsan_set_label(0, (char *)old_p + old_size, new_size - old_size); + return old_p; + } + uptr memcpy_size = Min(new_size, old_size); + void *new_p = DFsanAllocate(new_size, alignment, false /*zeroise*/); + if (new_p) { + dfsan_copy_memory(new_p, old_p, memcpy_size); + dfsan_deallocate(old_p); + } + return new_p; +} + +void *DFsanCalloc(uptr nmemb, uptr size) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + BufferedStackTrace stack; + ReportCallocOverflow(nmemb, size, &stack); + } + return DFsanAllocate(nmemb * size, sizeof(u64), true /*zeroise*/); +} + +static uptr AllocationSize(const void *p) { + if (!p) + return 0; + const void *beg = allocator.GetBlockBegin(p); + if (beg != p) + return 0; + Metadata *b = (Metadata *)allocator.GetMetaData(p); + return b->requested_size; +} + +void *dfsan_malloc(uptr size) { + return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/)); +} + +void *dfsan_calloc(uptr nmemb, uptr size) { + return SetErrnoOnNull(DFsanCalloc(nmemb, size)); +} + +void *dfsan_realloc(void *ptr, uptr size) { + if (!ptr) + return SetErrnoOnNull(DFsanAllocate(size, sizeof(u64), false /*zeroise*/)); + if (size == 0) { + dfsan_deallocate(ptr); + return nullptr; + } + return SetErrnoOnNull(DFsanReallocate(ptr, size, sizeof(u64))); +} + +void *dfsan_reallocarray(void *ptr, uptr nmemb, uptr size) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + BufferedStackTrace stack; + ReportReallocArrayOverflow(nmemb, size, &stack); + } + return dfsan_realloc(ptr, nmemb * size); +} + +void *dfsan_valloc(uptr size) { + return SetErrnoOnNull( + DFsanAllocate(size, GetPageSizeCached(), false /*zeroise*/)); +} + +void *dfsan_pvalloc(uptr size) { + uptr PageSize = GetPageSizeCached(); + if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + BufferedStackTrace stack; + ReportPvallocOverflow(size, &stack); + } + // pvalloc(0) should allocate one page. + size = size ? RoundUpTo(size, PageSize) : PageSize; + return SetErrnoOnNull(DFsanAllocate(size, PageSize, false /*zeroise*/)); +} + +void *dfsan_aligned_alloc(uptr alignment, uptr size) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + BufferedStackTrace stack; + ReportInvalidAlignedAllocAlignment(size, alignment, &stack); + } + return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/)); +} + +void *dfsan_memalign(uptr alignment, uptr size) { + if (UNLIKELY(!IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + BufferedStackTrace stack; + ReportInvalidAllocationAlignment(alignment, &stack); + } + return SetErrnoOnNull(DFsanAllocate(size, alignment, false /*zeroise*/)); +} + +int dfsan_posix_memalign(void **memptr, uptr alignment, uptr size) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + if (AllocatorMayReturnNull()) + return errno_EINVAL; + BufferedStackTrace stack; + ReportInvalidPosixMemalignAlignment(alignment, &stack); + } + void *ptr = DFsanAllocate(size, alignment, false /*zeroise*/); + if (UNLIKELY(!ptr)) + // OOM error is already taken care of by DFsanAllocate. + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +} // namespace __dfsan + +using namespace __dfsan; + +uptr __sanitizer_get_current_allocated_bytes() { + uptr stats[AllocatorStatCount]; + allocator.GetStats(stats); + return stats[AllocatorStatAllocated]; +} + +uptr __sanitizer_get_heap_size() { + uptr stats[AllocatorStatCount]; + allocator.GetStats(stats); + return stats[AllocatorStatMapped]; +} + +uptr __sanitizer_get_free_bytes() { return 1; } + +uptr __sanitizer_get_unmapped_bytes() { return 1; } + +uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } + +int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } + +uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } diff --git a/compiler-rt/lib/dfsan/dfsan_flags.inc b/compiler-rt/lib/dfsan/dfsan_flags.inc --- a/compiler-rt/lib/dfsan/dfsan_flags.inc +++ b/compiler-rt/lib/dfsan/dfsan_flags.inc @@ -40,3 +40,7 @@ "The depth limit of origin tracking stack traces.") DFSAN_FLAG(bool, check_origin_invariant, false, "Whether to check if the origin invariant holds.") +DFSAN_FLAG(bool, zero_in_malloc, true, + "Whether to zero shadow space of new allocated memory.") +DFSAN_FLAG(bool, zero_in_free, true, + "Whether to zero shadow space of deallocated memory.")