diff --git a/compiler-rt/lib/lsan/lsan_common.h b/compiler-rt/lib/lsan/lsan_common.h --- a/compiler-rt/lib/lsan/lsan_common.h +++ b/compiler-rt/lib/lsan/lsan_common.h @@ -82,6 +82,15 @@ inline Flags *flags() { return &lsan_flags; } void RegisterLsanFlags(FlagParser *parser, Flags *f); +struct LeakedChunk { + uptr chunk; + u32 stack_trace_id; + uptr leaked_size; + ChunkTag tag; +}; + +using LeakedChunks = InternalMmapVector; + struct Leak { u32 id; uptr hit_count; @@ -101,8 +110,7 @@ class LeakReport { public: LeakReport() {} - void AddLeakedChunk(uptr chunk, u32 stack_trace_id, uptr leaked_size, - ChunkTag tag); + void AddLeakedChunks(const LeakedChunks &chunks); void ReportTopLeaks(uptr max_leaks); void PrintSummary(); uptr ApplySuppressions(); @@ -136,7 +144,7 @@ // threads and enumerating roots. struct CheckForLeaksParam { Frontier frontier; - LeakReport leak_report; + LeakedChunks leaks; bool success = false; }; diff --git a/compiler-rt/lib/lsan/lsan_common.cpp b/compiler-rt/lib/lsan/lsan_common.cpp --- a/compiler-rt/lib/lsan/lsan_common.cpp +++ b/compiler-rt/lib/lsan/lsan_common.cpp @@ -639,15 +639,13 @@ // a LeakReport. static void CollectLeaksCb(uptr chunk, void *arg) { CHECK(arg); - LeakReport *leak_report = reinterpret_cast(arg); + LeakedChunks *leaks = reinterpret_cast(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (!m.allocated()) return; - if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) { - leak_report->AddLeakedChunk(chunk, m.stack_trace_id(), m.requested_size(), - m.tag()); - } + if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) + leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()}); } void LeakSuppressionContext::PrintMatchedSuppressions() { @@ -708,7 +706,7 @@ CHECK(!param->success); ReportUnsuspendedThreads(suspended_threads); ClassifyAllChunks(suspended_threads, ¶m->frontier); - ForEachChunk(CollectLeaksCb, ¶m->leak_report); + ForEachChunk(CollectLeaksCb, ¶m->leaks); // Clean up for subsequent leak checks. This assumes we did not overwrite any // kIgnored tags. ForEachChunk(ResetTagsCb, nullptr); @@ -757,17 +755,20 @@ "etc)\n"); Die(); } + LeakReport leak_report; + leak_report.AddLeakedChunks(param.leaks); + // No new suppressions stacks, so rerun will not help and we can report. - if (!param.leak_report.ApplySuppressions()) - return PrintResults(param.leak_report); + if (!leak_report.ApplySuppressions()) + return PrintResults(leak_report); // No indirect leaks to report, so we are done here. - if (!param.leak_report.IndirectUnsuppressedLeakCount()) - return PrintResults(param.leak_report); + if (!leak_report.IndirectUnsuppressedLeakCount()) + return PrintResults(leak_report); if (i >= 8) { Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n"); - return PrintResults(param.leak_report); + return PrintResults(leak_report); } // We found a new previously unseen suppressed call stack. Rerun to make @@ -804,40 +805,45 @@ // A hard limit on the number of distinct leaks, to avoid quadratic complexity // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks // in real-world applications. -// FIXME: Get rid of this limit by changing the implementation of LeakReport to -// use a hash table. +// FIXME: Get rid of this limit by moving logic into DedupLeaks. const uptr kMaxLeaksConsidered = 5000; -void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id, - uptr leaked_size, ChunkTag tag) { - CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); - - if (u32 resolution = flags()->resolution) { - StackTrace stack = StackDepotGet(stack_trace_id); - stack.size = Min(stack.size, resolution); - stack_trace_id = StackDepotPut(stack); - } +void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) { + for (const LeakedChunk &leak : chunks) { + uptr chunk = leak.chunk; + u32 stack_trace_id = leak.stack_trace_id; + uptr leaked_size = leak.leaked_size; + ChunkTag tag = leak.tag; + CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked); + + if (u32 resolution = flags()->resolution) { + StackTrace stack = StackDepotGet(stack_trace_id); + stack.size = Min(stack.size, resolution); + stack_trace_id = StackDepotPut(stack); + } - bool is_directly_leaked = (tag == kDirectlyLeaked); - uptr i; - for (i = 0; i < leaks_.size(); i++) { - if (leaks_[i].stack_trace_id == stack_trace_id && - leaks_[i].is_directly_leaked == is_directly_leaked) { - leaks_[i].hit_count++; - leaks_[i].total_size += leaked_size; - break; + bool is_directly_leaked = (tag == kDirectlyLeaked); + uptr i; + for (i = 0; i < leaks_.size(); i++) { + if (leaks_[i].stack_trace_id == stack_trace_id && + leaks_[i].is_directly_leaked == is_directly_leaked) { + leaks_[i].hit_count++; + leaks_[i].total_size += leaked_size; + break; + } + } + if (i == leaks_.size()) { + if (leaks_.size() == kMaxLeaksConsidered) + return; + Leak leak = {next_id_++, /* hit_count */ 1, + leaked_size, stack_trace_id, + is_directly_leaked, /* is_suppressed */ false}; + leaks_.push_back(leak); + } + if (flags()->report_objects) { + LeakedObject obj = {leaks_[i].id, chunk, leaked_size}; + leaked_objects_.push_back(obj); } - } - if (i == leaks_.size()) { - if (leaks_.size() == kMaxLeaksConsidered) - return; - Leak leak = {next_id_++, /* hit_count */ 1, leaked_size, - stack_trace_id, is_directly_leaked, /* is_suppressed */ false}; - leaks_.push_back(leak); - } - if (flags()->report_objects) { - LeakedObject obj = {leaks_[i].id, chunk, leaked_size}; - leaked_objects_.push_back(obj); } }