Index: compiler-rt/trunk/include/sanitizer/lsan_interface.h =================================================================== --- compiler-rt/trunk/include/sanitizer/lsan_interface.h +++ compiler-rt/trunk/include/sanitizer/lsan_interface.h @@ -41,14 +41,25 @@ void __lsan_register_root_region(const void *p, size_t size); void __lsan_unregister_root_region(const void *p, size_t size); - // Calling this function makes LSan enter the leak checking phase immediately. - // Use this if normal end-of-process leak checking happens too late (e.g. if - // you have intentional memory leaks in your shutdown code). Calling this - // function overrides end-of-process leak checking; it must be called at - // most once per process. This function will terminate the process if there - // are memory leaks and the exit_code flag is non-zero. + // Check for leaks now. This function behaves identically to the default + // end-of-process leak check. In particular, it will terminate the process if + // leaks are found and the exit_code flag is non-zero. + // Subsequent calls to this function will have no effect and end-of-process + // leak check will not run. Effectively, end-of-process leak check is moved to + // the time of first invocation of this function. + // By calling this function early during process shutdown, you can instruct + // LSan to ignore shutdown-only leaks which happen later on. void __lsan_do_leak_check(); + // Check for leaks now. Returns zero if no leaks have been found or if leak + // detection is disabled, non-zero otherwise. + // This function may be called repeatedly, e.g. to periodically check a + // long-running process. It prints a leak report if appropriate, but does not + // terminate the process. It does not affect the behavior of + // __lsan_do_leak_check() or the end-of-process leak check, and is not + // affected by them. + int __lsan_do_recoverable_leak_check(); + // The user may optionally provide this function to disallow leak checking // for the program it is linked into (if the return value is non-zero). This // function must be defined as returning a constant value; any behavior beyond Index: compiler-rt/trunk/lib/lsan/lsan_common.cc =================================================================== --- compiler-rt/trunk/lib/lsan/lsan_common.cc +++ compiler-rt/trunk/lib/lsan/lsan_common.cc @@ -126,13 +126,14 @@ // Scans the memory range, looking for byte patterns that point into allocator // chunks. Marks those chunks with |tag| and adds them to |frontier|. -// There are two usage modes for this function: finding reachable or ignored -// chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks +// There are two usage modes for this function: finding reachable chunks +// (|tag| = kReachable) and finding indirectly leaked chunks // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill, // so |frontier| = 0. void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier, const char *region_type, ChunkTag tag) { + CHECK(tag == kReachable || tag == kIndirectlyLeaked); const uptr alignment = flags()->pointer_alignment(); LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end); uptr pp = begin; @@ -146,9 +147,7 @@ // Pointers to self don't count. This matters when tag == kIndirectlyLeaked. if (chunk == begin) continue; LsanMetadata m(chunk); - // Reachable beats ignored beats leaked. - if (m.tag() == kReachable) continue; - if (m.tag() == kIgnored && tag != kReachable) continue; + if (m.tag() == kReachable || m.tag() == kIgnored) continue; // Do this check relatively late so we can log only the interesting cases. if (!flags()->use_poisoned && WordIsPoisoned(pp)) { @@ -287,7 +286,7 @@ LsanMetadata m(chunk); if (m.allocated() && m.tag() != kReachable) { ScanRangeForPointers(chunk, chunk + m.requested_size(), - /* frontier */ 0, "HEAP", kIndirectlyLeaked); + /* frontier */ nullptr, "HEAP", kIndirectlyLeaked); } } @@ -297,8 +296,11 @@ CHECK(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); - if (m.allocated() && m.tag() == kIgnored) + if (m.allocated() && m.tag() == kIgnored) { + LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", + chunk, chunk + m.requested_size(), m.requested_size()); reinterpret_cast(arg)->push_back(chunk); + } } // Sets the appropriate tag on each chunk. @@ -306,26 +308,33 @@ // Holds the flood fill frontier. Frontier frontier(1); + ForEachChunk(CollectIgnoredCb, &frontier); ProcessGlobalRegions(&frontier); ProcessThreads(suspended_threads, &frontier); ProcessRootRegions(&frontier); FloodFillTag(&frontier, kReachable); + // The check here is relatively expensive, so we do this in a separate flood // fill. That way we can skip the check for chunks that are reachable // otherwise. LOG_POINTERS("Processing platform-specific allocations.\n"); + CHECK_EQ(0, frontier.size()); ProcessPlatformSpecificAllocations(&frontier); FloodFillTag(&frontier, kReachable); - LOG_POINTERS("Scanning ignored chunks.\n"); - CHECK_EQ(0, frontier.size()); - ForEachChunk(CollectIgnoredCb, &frontier); - FloodFillTag(&frontier, kIgnored); - // Iterate over leaked chunks and mark those that are reachable from other // leaked chunks. LOG_POINTERS("Scanning leaked chunks.\n"); - ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */); + ForEachChunk(MarkIndirectlyLeakedCb, nullptr); +} + +// ForEachChunk callback. Resets the tags to pre-leak-check state. +static void ResetTagsCb(uptr chunk, void *arg) { + (void)arg; + chunk = GetUserBegin(chunk); + LsanMetadata m(chunk); + if (m.allocated() && m.tag() != kIgnored) + m.set_tag(kDirectlyLeaked); } static void PrintStackTraceById(u32 stack_trace_id) { @@ -371,35 +380,33 @@ Printf("%s\n\n", line); } -struct DoLeakCheckParam { +struct CheckForLeaksParam { bool success; LeakReport leak_report; }; -static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads, - void *arg) { - DoLeakCheckParam *param = reinterpret_cast(arg); +static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads, + void *arg) { + CheckForLeaksParam *param = reinterpret_cast(arg); CHECK(param); CHECK(!param->success); ClassifyAllChunks(suspended_threads); ForEachChunk(CollectLeaksCb, ¶m->leak_report); + // Clean up for subsequent leak checks. This assumes we did not overwrite any + // kIgnored tags. + ForEachChunk(ResetTagsCb, nullptr); param->success = true; } -void DoLeakCheck() { - EnsureMainThreadIDIsCorrect(); - BlockingMutexLock l(&global_mutex); - static bool already_done; - if (already_done) return; - already_done = true; +static bool CheckForLeaks() { if (&__lsan_is_turned_off && __lsan_is_turned_off()) - return; - - DoLeakCheckParam param; + return false; + EnsureMainThreadIDIsCorrect(); + CheckForLeaksParam param; param.success = false; LockThreadRegistry(); LockAllocator(); - DoStopTheWorld(DoLeakCheckCallback, ¶m); + DoStopTheWorld(CheckForLeaksCallback, ¶m); UnlockAllocator(); UnlockThreadRegistry(); @@ -423,14 +430,33 @@ PrintMatchedSuppressions(); if (unsuppressed_count > 0) { param.leak_report.PrintSummary(); - if (flags()->exitcode) { - if (common_flags()->coverage) - __sanitizer_cov_dump(); - internal__exit(flags()->exitcode); - } + return true; + } + return false; +} + +void DoLeakCheck() { + BlockingMutexLock l(&global_mutex); + static bool already_done; + if (already_done) return; + already_done = true; + bool have_leaks = CheckForLeaks(); + if (!have_leaks) { + return; + } + if (flags()->exitcode) { + if (common_flags()->coverage) + __sanitizer_cov_dump(); + internal__exit(flags()->exitcode); } } +static int DoRecoverableLeakCheck() { + BlockingMutexLock l(&global_mutex); + bool have_leaks = CheckForLeaks(); + return have_leaks ? 1 : 0; +} + static Suppression *GetSuppressionForAddr(uptr addr) { Suppression *s = nullptr; @@ -676,6 +702,14 @@ #endif // CAN_SANITIZE_LEAKS } +int __lsan_do_recoverable_leak_check() { +#if CAN_SANITIZE_LEAKS + if (common_flags()->detect_leaks) + return __lsan::DoRecoverableLeakCheck(); +#endif // CAN_SANITIZE_LEAKS + return 0; +} + #if !SANITIZER_SUPPORTS_WEAK_HOOKS SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE int __lsan_is_turned_off() { Index: compiler-rt/trunk/lib/lsan/lsan_common_linux.cc =================================================================== --- compiler-rt/trunk/lib/lsan/lsan_common_linux.cc +++ compiler-rt/trunk/lib/lsan/lsan_common_linux.cc @@ -110,7 +110,7 @@ reinterpret_cast(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); - if (m.allocated() && m.tag() != kReachable) { + if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) { u32 stack_id = m.stack_trace_id(); uptr caller_pc = 0; if (stack_id > 0) Index: compiler-rt/trunk/test/lsan/TestCases/recoverable_leak_check.cc =================================================================== --- compiler-rt/trunk/test/lsan/TestCases/recoverable_leak_check.cc +++ compiler-rt/trunk/test/lsan/TestCases/recoverable_leak_check.cc @@ -0,0 +1,32 @@ +// Test for on-demand leak checking. +// RUN: LSAN_BASE="use_stacks=0:use_registers=0" +// RUN: %clangxx_lsan %s -o %t +// RUN: LSAN_OPTIONS=$LSAN_BASE %run %t foo 2>&1 | FileCheck %s +// RUN: LSAN_OPTIONS=$LSAN_BASE %run %t 2>&1 | FileCheck %s + +#include +#include +#include +#include +#include + +void *p; + +int main(int argc, char *argv[]) { + p = malloc(23); + + assert(__lsan_do_recoverable_leak_check() == 0); + + fprintf(stderr, "Test alloc: %p.\n", malloc(1337)); +// CHECK: Test alloc: + + assert(__lsan_do_recoverable_leak_check() == 1); +// CHECK: SUMMARY: {{(Leak|Address)}}Sanitizer: 1337 byte + + // Test that we correctly reset chunk tags. + p = 0; + assert(__lsan_do_recoverable_leak_check() == 1); +// CHECK: SUMMARY: {{(Leak|Address)}}Sanitizer: 1360 byte + + _exit(0); +}