Index: compiler-rt/trunk/lib/asan/asan_allocator.h =================================================================== --- compiler-rt/trunk/lib/asan/asan_allocator.h +++ compiler-rt/trunk/lib/asan/asan_allocator.h @@ -213,5 +213,7 @@ void PrintInternalAllocatorStats(); void AsanSoftRssLimitExceededCallback(bool exceeded); +AsanAllocator &get_allocator(); + } // namespace __asan #endif // ASAN_ALLOCATOR_H Index: compiler-rt/trunk/lib/asan/asan_allocator.cc =================================================================== --- compiler-rt/trunk/lib/asan/asan_allocator.cc +++ compiler-rt/trunk/lib/asan/asan_allocator.cc @@ -47,8 +47,6 @@ return res; } -static AsanAllocator &get_allocator(); - // The memory chunk allocated from the underlying allocator looks like this: // L L L L L L H H U U U U U U R R // L -- left redzone words (0 or more bytes) @@ -719,7 +717,7 @@ static Allocator instance(LINKER_INITIALIZED); -static AsanAllocator &get_allocator() { +AsanAllocator &get_allocator() { return instance.allocator; } Index: compiler-rt/trunk/lib/asan/asan_interceptors.cc =================================================================== --- compiler-rt/trunk/lib/asan/asan_interceptors.cc +++ compiler-rt/trunk/lib/asan/asan_interceptors.cc @@ -22,6 +22,7 @@ #include "asan_stats.h" #include "asan_suppressions.h" #include "lsan/lsan_common.h" +#include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_libc.h" #if SANITIZER_POSIX @@ -705,11 +706,23 @@ #endif // ASAN_INTERCEPT___CXA_ATEXIT #if ASAN_INTERCEPT_FORK +static void BeforeFork() { + get_allocator().ForceLock(); + StackDepotLockAll(); +} + +static void AfterFork() { + StackDepotUnlockAll(); + get_allocator().ForceUnlock(); +} + INTERCEPTOR(int, fork, void) { ENSURE_ASAN_INITED(); + BeforeFork(); if (common_flags()->coverage) CovBeforeFork(); int pid = REAL(fork)(); if (common_flags()->coverage) CovAfterFork(pid); + AfterFork(); return pid; } #endif // ASAN_INTERCEPT_FORK Index: compiler-rt/trunk/lib/lsan/lsan_interceptors.cc =================================================================== --- compiler-rt/trunk/lib/lsan/lsan_interceptors.cc +++ compiler-rt/trunk/lib/lsan/lsan_interceptors.cc @@ -22,6 +22,7 @@ #include "sanitizer_common/sanitizer_platform_interceptors.h" #include "sanitizer_common/sanitizer_platform_limits_posix.h" #include "sanitizer_common/sanitizer_posix.h" +#include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_tls_get_addr.h" #include "lsan.h" #include "lsan_allocator.h" @@ -97,6 +98,24 @@ } #endif +static void BeforeFork() { + LockAllocator(); + StackDepotLockAll(); +} + +static void AfterFork() { + StackDepotUnlockAll(); + UnlockAllocator(); +} + +INTERCEPTOR(int, fork, void) { + ENSURE_LSAN_INITED; + BeforeFork(); + int pid = REAL(fork)(); + AfterFork(); + return pid; +} + #if SANITIZER_INTERCEPT_MEMALIGN INTERCEPTOR(void*, memalign, uptr alignment, uptr size) { ENSURE_LSAN_INITED; @@ -336,6 +355,7 @@ LSAN_MAYBE_INTERCEPT_MALLOPT; INTERCEPT_FUNCTION(pthread_create); INTERCEPT_FUNCTION(pthread_join); + INTERCEPT_FUNCTION(fork); if (pthread_key_create(&g_thread_finalize_key, &thread_finalize)) { Report("LeakSanitizer: failed to create thread key.\n"); Index: compiler-rt/trunk/lib/msan/msan_allocator.h =================================================================== --- compiler-rt/trunk/lib/msan/msan_allocator.h +++ compiler-rt/trunk/lib/msan/msan_allocator.h @@ -15,9 +15,106 @@ #define MSAN_ALLOCATOR_H #include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" namespace __msan { +struct Metadata { + uptr requested_size; +}; + +struct MsanMapUnmapCallback { + void OnMap(uptr p, uptr size) const {} + void OnUnmap(uptr p, uptr size) const { + __msan_unpoison((void *)p, size); + + // We are about to unmap a chunk of user memory. + // Mark the corresponding shadow memory as not needed. + uptr shadow_p = MEM_TO_SHADOW(p); + ReleaseMemoryPagesToOS(shadow_p, shadow_p + size); + if (__msan_get_track_origins()) { + uptr origin_p = MEM_TO_ORIGIN(p); + ReleaseMemoryPagesToOS(origin_p, origin_p + size); + } + } +}; + +#if defined(__mips64) + static const uptr kMaxAllowedMallocSize = 2UL << 30; + static const uptr kRegionSizeLog = 20; + static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; + typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; + + struct AP32 { + static const uptr kSpaceBeg = 0; + static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; + static const uptr kMetadataSize = sizeof(Metadata); + typedef __sanitizer::CompactSizeClassMap SizeClassMap; + static const uptr kRegionSizeLog = __msan::kRegionSizeLog; + typedef __msan::ByteMap ByteMap; + typedef MsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + }; + typedef SizeClassAllocator32 PrimaryAllocator; +#elif defined(__x86_64__) +#if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING) + static const uptr kAllocatorSpace = 0x700000000000ULL; +#else + static const uptr kAllocatorSpace = 0x600000000000ULL; +#endif + static const uptr kMaxAllowedMallocSize = 8UL << 30; + + struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = kAllocatorSpace; + static const uptr kSpaceSize = 0x40000000000; // 4T. + static const uptr kMetadataSize = sizeof(Metadata); + typedef DefaultSizeClassMap SizeClassMap; + typedef MsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + }; + + typedef SizeClassAllocator64 PrimaryAllocator; + +#elif defined(__powerpc64__) + static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G + + struct AP64 { // Allocator64 parameters. Deliberately using a short name. + static const uptr kSpaceBeg = 0x300000000000; + static const uptr kSpaceSize = 0x020000000000; // 2T. + static const uptr kMetadataSize = sizeof(Metadata); + typedef DefaultSizeClassMap SizeClassMap; + typedef MsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + }; + + typedef SizeClassAllocator64 PrimaryAllocator; +#elif defined(__aarch64__) + static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G + static const uptr kRegionSizeLog = 20; + static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; + typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; + + struct AP32 { + static const uptr kSpaceBeg = 0; + static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; + static const uptr kMetadataSize = sizeof(Metadata); + typedef __sanitizer::CompactSizeClassMap SizeClassMap; + static const uptr kRegionSizeLog = __msan::kRegionSizeLog; + typedef __msan::ByteMap ByteMap; + typedef MsanMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = 0; + }; + typedef SizeClassAllocator32 PrimaryAllocator; +#endif +typedef SizeClassAllocatorLocalCache AllocatorCache; +typedef LargeMmapAllocator SecondaryAllocator; +typedef CombinedAllocator Allocator; + + +Allocator &get_allocator(); + struct MsanThreadLocalMallocStorage { uptr quarantine_cache[16]; // Allocator cache contains atomic_uint64_t which must be 8-byte aligned. Index: compiler-rt/trunk/lib/msan/msan_allocator.cc =================================================================== --- compiler-rt/trunk/lib/msan/msan_allocator.cc +++ compiler-rt/trunk/lib/msan/msan_allocator.cc @@ -12,8 +12,6 @@ // MemorySanitizer allocator. //===----------------------------------------------------------------------===// -#include "sanitizer_common/sanitizer_allocator.h" -#include "sanitizer_common/sanitizer_allocator_interface.h" #include "msan.h" #include "msan_allocator.h" #include "msan_origin.h" @@ -22,102 +20,12 @@ namespace __msan { -struct Metadata { - uptr requested_size; -}; - -struct MsanMapUnmapCallback { - void OnMap(uptr p, uptr size) const {} - void OnUnmap(uptr p, uptr size) const { - __msan_unpoison((void *)p, size); - - // We are about to unmap a chunk of user memory. - // Mark the corresponding shadow memory as not needed. - uptr shadow_p = MEM_TO_SHADOW(p); - ReleaseMemoryPagesToOS(shadow_p, shadow_p + size); - if (__msan_get_track_origins()) { - uptr origin_p = MEM_TO_ORIGIN(p); - ReleaseMemoryPagesToOS(origin_p, origin_p + size); - } - } -}; - -#if defined(__mips64) - static const uptr kMaxAllowedMallocSize = 2UL << 30; - static const uptr kRegionSizeLog = 20; - static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; - typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; - - struct AP32 { - static const uptr kSpaceBeg = 0; - static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; - static const uptr kMetadataSize = sizeof(Metadata); - typedef __sanitizer::CompactSizeClassMap SizeClassMap; - static const uptr kRegionSizeLog = __msan::kRegionSizeLog; - typedef __msan::ByteMap ByteMap; - typedef MsanMapUnmapCallback MapUnmapCallback; - static const uptr kFlags = 0; - }; - typedef SizeClassAllocator32 PrimaryAllocator; -#elif defined(__x86_64__) -#if SANITIZER_LINUX && !defined(MSAN_LINUX_X86_64_OLD_MAPPING) - static const uptr kAllocatorSpace = 0x700000000000ULL; -#else - static const uptr kAllocatorSpace = 0x600000000000ULL; -#endif - static const uptr kMaxAllowedMallocSize = 8UL << 30; - - struct AP64 { // Allocator64 parameters. Deliberately using a short name. - static const uptr kSpaceBeg = kAllocatorSpace; - static const uptr kSpaceSize = 0x40000000000; // 4T. - static const uptr kMetadataSize = sizeof(Metadata); - typedef DefaultSizeClassMap SizeClassMap; - typedef MsanMapUnmapCallback MapUnmapCallback; - static const uptr kFlags = 0; - }; - - typedef SizeClassAllocator64 PrimaryAllocator; - -#elif defined(__powerpc64__) - static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G - - struct AP64 { // Allocator64 parameters. Deliberately using a short name. - static const uptr kSpaceBeg = 0x300000000000; - static const uptr kSpaceSize = 0x020000000000; // 2T. - static const uptr kMetadataSize = sizeof(Metadata); - typedef DefaultSizeClassMap SizeClassMap; - typedef MsanMapUnmapCallback MapUnmapCallback; - static const uptr kFlags = 0; - }; - - typedef SizeClassAllocator64 PrimaryAllocator; -#elif defined(__aarch64__) - static const uptr kMaxAllowedMallocSize = 2UL << 30; // 2G - static const uptr kRegionSizeLog = 20; - static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; - typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; - - struct AP32 { - static const uptr kSpaceBeg = 0; - static const u64 kSpaceSize = SANITIZER_MMAP_RANGE_SIZE; - static const uptr kMetadataSize = sizeof(Metadata); - typedef __sanitizer::CompactSizeClassMap SizeClassMap; - static const uptr kRegionSizeLog = __msan::kRegionSizeLog; - typedef __msan::ByteMap ByteMap; - typedef MsanMapUnmapCallback MapUnmapCallback; - static const uptr kFlags = 0; - }; - typedef SizeClassAllocator32 PrimaryAllocator; -#endif -typedef SizeClassAllocatorLocalCache AllocatorCache; -typedef LargeMmapAllocator SecondaryAllocator; -typedef CombinedAllocator Allocator; - static Allocator allocator; static AllocatorCache fallback_allocator_cache; static SpinMutex fallback_mutex; +Allocator &get_allocator() { return allocator; } + void MsanAllocatorInit() { allocator.Init( common_flags()->allocator_may_return_null, Index: compiler-rt/trunk/lib/msan/msan_interceptors.cc =================================================================== --- compiler-rt/trunk/lib/msan/msan_interceptors.cc +++ compiler-rt/trunk/lib/msan/msan_interceptors.cc @@ -1228,6 +1228,7 @@ } static void BeforeFork() { + get_allocator().ForceLock(); StackDepotLockAll(); ChainedOriginDepotLockAll(); } @@ -1235,6 +1236,7 @@ static void AfterFork() { ChainedOriginDepotUnlockAll(); StackDepotUnlockAll(); + get_allocator().ForceUnlock(); } INTERCEPTOR(int, fork, void) { Index: compiler-rt/trunk/test/sanitizer_common/TestCases/Linux/allocator_fork_no_hang.cc =================================================================== --- compiler-rt/trunk/test/sanitizer_common/TestCases/Linux/allocator_fork_no_hang.cc +++ compiler-rt/trunk/test/sanitizer_common/TestCases/Linux/allocator_fork_no_hang.cc @@ -0,0 +1,118 @@ +// https://github.com/google/sanitizers/issues/774 +// Test that sanitizer allocator is fork-safe. +// Run a number of threads that perform memory allocation/deallocation, then fork +// and verify that malloc/free do not deadlock in the child process. + +// RUN: %clangxx -std=c++11 -O0 %s -o %t +// RUN: ASAN_OPTIONS=detect_leaks=0 %run %t 2>&1 | FileCheck %s + +// Fun fact: if test output is redirected to a file (as opposed to +// being piped directly to FileCheck), we may lose some "done"s due to +// a kernel bug: +// https://lkml.org/lkml/2014/2/17/324 + +// UNSUPPORTED: tsan + +// Flaky on PPC64. +// UNSUPPORTED: powerpc64-target-arch +// UNSUPPORTED: powerpc64le-target-arch + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +int done; + +void *worker(void *arg) { + while (true) { + void *p = malloc(4); + if (__atomic_load_n(&done, __ATOMIC_RELAXED)) + return 0; + } + return 0; +} + +// Run through malloc/free in the child process. +// This can deadlock on allocator cache refilling. +void child() { + for (int i = 0; i < 10000; ++i) { + void *p = malloc(4); + } + write(2, "done\n", 5); +} + +void test() { + const int kThreads = 10; + pthread_t t[kThreads]; + for (int i = 0; i < kThreads; ++i) + pthread_create(&t[i], NULL, worker, (void*)(long)i); + usleep(100000); + pid_t pid = fork(); + if (pid) { + // parent + __atomic_store_n(&done, 1, __ATOMIC_RELAXED); + pid_t p; + while ((p = wait(NULL)) == -1) { } + } else { + // child + child(); + } +} + +int main() { + const int kChildren = 30; + for (int i = 0; i < kChildren; ++i) { + pid_t pid = fork(); + if (pid) { + // parent + } else { + test(); + exit(0); + } + } + + for (int i = 0; i < kChildren; ++i) { + pid_t p; + while ((p = wait(NULL)) == -1) { } + } + + return 0; +} + +// Expect 30 (== kChildren) "done" messages. +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done +// CHECK: done