Index: lib/lsan/lsan_common_linux.cc =================================================================== --- lib/lsan/lsan_common_linux.cc +++ lib/lsan/lsan_common_linux.cc @@ -90,26 +90,34 @@ dl_iterate_phdr(ProcessGlobalRegionsCallback, frontier); } -static uptr GetCallerPC(u32 stack_id) { +static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) { CHECK(stack_id); uptr size = 0; - const uptr *trace = StackDepotGet(stack_id, &size); + const uptr *trace = map->StackDepotGet(stack_id, &size); // The top frame is our malloc/calloc/etc. The next frame is the caller. if (size >= 2) return trace[1]; return 0; } +struct ProcessPlatformAllocParam { + Frontier *frontier; + StackDepotReverseMap *stack_depot_reverse_map; +}; + // ForEachChunk callback. Identifies unreachable chunks which must be treated as // reachable. Marks them as reachable and adds them to the frontier. static void ProcessPlatformSpecificAllocationsCb(uptr chunk, void *arg) { CHECK(arg); + ProcessPlatformAllocParam *param = + reinterpret_cast(arg); chunk = GetUserBegin(chunk); LsanMetadata m(chunk); if (m.allocated() && m.tag() != kReachable) { - if (linker->containsAddress(GetCallerPC(m.stack_trace_id()))) { + if (linker->containsAddress( + GetCallerPC(m.stack_trace_id(), param->stack_depot_reverse_map))) { m.set_tag(kReachable); - reinterpret_cast(arg)->push_back(chunk); + param->frontier->push_back(chunk); } } } @@ -119,7 +127,9 @@ void ProcessPlatformSpecificAllocations(Frontier *frontier) { if (!flags()->use_tls) return; if (!linker) return; - ForEachChunk(ProcessPlatformSpecificAllocationsCb, frontier); + StackDepotReverseMap stack_depot_reverse_map; + ProcessPlatformAllocParam arg = {frontier, &stack_depot_reverse_map}; + ForEachChunk(ProcessPlatformSpecificAllocationsCb, &arg); } } // namespace __lsan Index: lib/sanitizer_common/sanitizer_stackdepot.h =================================================================== --- lib/sanitizer_common/sanitizer_stackdepot.h +++ lib/sanitizer_common/sanitizer_stackdepot.h @@ -13,6 +13,7 @@ #ifndef SANITIZER_STACKDEPOT_H #define SANITIZER_STACKDEPOT_H +#include "sanitizer_common.h" #include "sanitizer_internal_defs.h" namespace __sanitizer { @@ -31,6 +32,32 @@ StackDepotStats *StackDepotGetStats(); +struct StackDesc; + +// Instantiating this class creates a snapshot of StackDepot which can be +// efficiently queried with StackDepotGet(). You can use it concurrently with +// StackDepot, but the snapshot is only guaranteed to contain those stack traces +// which were stored before it was instantiated. +class StackDepotReverseMap { + public: + StackDepotReverseMap(); + const uptr *StackDepotGet(u32 id, uptr *size); + + private: + struct IdDescPair { + u32 id; + StackDesc *desc; + }; + + InternalMmapVector map_; + + // Disallow evil constructors. + StackDepotReverseMap(const StackDepotReverseMap&); + void operator=(const StackDepotReverseMap&); + + friend bool IdDescPairComparator(const IdDescPair &a, const IdDescPair &b); +}; + } // namespace __sanitizer #endif // SANITIZER_STACKDEPOT_H Index: lib/sanitizer_common/sanitizer_stackdepot.cc =================================================================== --- lib/sanitizer_common/sanitizer_stackdepot.cc +++ lib/sanitizer_common/sanitizer_stackdepot.cc @@ -201,4 +201,44 @@ return 0; } +bool IdDescPairComparator(const StackDepotReverseMap::IdDescPair &a, + const StackDepotReverseMap::IdDescPair &b) { + return a.id < b.id; +} + +StackDepotReverseMap::StackDepotReverseMap() + : map_(StackDepotGetStats()->n_uniq_ids + 100) { + for (int idx = 0; idx < kTabSize; idx++) { + atomic_uintptr_t *p = &depot.tab[idx]; + uptr v = atomic_load(p, memory_order_consume); + StackDesc *s = (StackDesc*)(v & ~1); + for (; s; s = s->link) { + IdDescPair pair = {s->id, s}; + map_.push_back(pair); + } + } + InternalSort(&map_, map_.size(), IdDescPairComparator); +} + +const uptr *StackDepotReverseMap::StackDepotGet(u32 id, uptr *size) { + if (!map_.size()) return 0; + // Binary search. + uptr i_min = 0; + uptr i_max = map_.size() - 1; + while (i_max >= i_min) { + uptr i_mid = (i_min + i_max) / 2; + if (map_[i_mid].id < id) + i_min = i_mid + 1; + else if (map_[i_mid].id > id) + i_max = i_mid - 1; + else { + StackDesc *desc = map_[i_mid].desc; + *size = desc->size; + return desc->stack; + } + } + *size = 0; + return 0; +} + } // namespace __sanitizer Index: lib/sanitizer_common/tests/sanitizer_stackdepot_test.cc =================================================================== --- lib/sanitizer_common/tests/sanitizer_stackdepot_test.cc +++ lib/sanitizer_common/tests/sanitizer_stackdepot_test.cc @@ -66,4 +66,27 @@ EXPECT_NE(i1, i2); } +TEST(SanitizerCommon, StackDepotReverseMap) { + uptr s1[] = {1, 2, 3, 4, 5}; + uptr s2[] = {7, 1, 3, 0}; + uptr s3[] = {10, 2, 5, 3}; + uptr s4[] = {1, 3, 2, 5}; + u32 ids[4] = {0}; + ids[0] = StackDepotPut(s1, ARRAY_SIZE(s1)); + ids[1] = StackDepotPut(s2, ARRAY_SIZE(s2)); + ids[2] = StackDepotPut(s3, ARRAY_SIZE(s3)); + ids[3] = StackDepotPut(s4, ARRAY_SIZE(s4)); + + StackDepotReverseMap map; + + for (uptr i = 0; i < 4; i++) { + uptr sz_depot, sz_map; + const uptr *sp_depot, *sp_map; + sp_depot = StackDepotGet(ids[i], &sz_depot); + sp_map = map.StackDepotGet(ids[i], &sz_map); + EXPECT_EQ(sz_depot, sz_map); + EXPECT_EQ(sp_depot, sp_map); + } +} + } // namespace __sanitizer