Index: lsan.cc =================================================================== --- lsan.cc +++ lsan.cc @@ -70,6 +70,11 @@ if (lsan_inited) return; lsan_init_is_running = true; + +#if SANITIZER_ANDROID + EnsureTLSSlotInit(); +#endif // SANITIZER_ANDROID + SanitizerToolName = "LeakSanitizer"; CacheBinaryName(); AvoidCVE_2016_2143(); @@ -89,6 +94,8 @@ InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); + AndroidLogInit(); + lsan_inited = true; lsan_init_is_running = false; } Index: lsan_allocator.h =================================================================== --- lsan_allocator.h +++ lsan_allocator.h @@ -18,8 +18,66 @@ #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_internal_defs.h" +#include "lsan_common.h" + + +#if SANITIZER_ANDROID +// Voodoo from tsan +#if defined(__aarch64__) +# define __get_tls() ({ void** __val; __asm__("mrs %0, tpidr_el0" : "=r"(__val)); __val; }) +#elif defined(__x86_64__) +# define __get_tls() ({ void** __val; __asm__("mov %%fs:0, %0" : "=r"(__val)); __val; }) +#else +#error unsupported architecture +#endif + +static const int TLS_SLOT_TSAN = 8; + +#define LsanTLSSlotBigStruct ((struct LsanTLSSlotBig*)(__get_tls()[TLS_SLOT_TSAN])) +#endif // SANITIZER_ANDROID + + namespace __lsan { + +struct ChunkMetadata { + u8 allocated : 8; // Must be first. + ChunkTag tag : 2; + uptr requested_size : 54; + u32 stack_trace_id; +}; + +#if defined(__mips64) || defined(__aarch64__) +static const uptr kMaxAllowedMallocSize = 4UL << 30; +static const uptr kRegionSizeLog = 20; +static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; +typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; +typedef CompactSizeClassMap SizeClassMap; +typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, + sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap> + PrimaryAllocator; +#else +static const uptr kMaxAllowedMallocSize = 8UL << 30; +static const uptr kAllocatorSpace = 0x600000000000ULL; +static const uptr kAllocatorSize = 0x40000000000ULL; // 4T. +typedef SizeClassAllocator64 PrimaryAllocator; +#endif +typedef SizeClassAllocatorLocalCache AllocatorCache; +typedef LargeMmapAllocator<> SecondaryAllocator; +typedef CombinedAllocator Allocator; + + +#if SANITIZER_ANDROID +struct LsanTLSSlotBig { + int disable_counter; + u32 current_thread_tid; + AllocatorCache cache; +}; +#endif // SANITIZER_ANDROID + + void *Allocate(const StackTrace &stack, uptr size, uptr alignment, bool cleared); void Deallocate(void *p); @@ -34,6 +92,8 @@ void AllocatorThreadFinish(); void InitializeAllocator(); +void EnsureTLSSlotInit(void); + } // namespace __lsan #endif // LSAN_ALLOCATOR_H Index: lsan_allocator.cc =================================================================== --- lsan_allocator.cc +++ lsan_allocator.cc @@ -25,43 +25,50 @@ namespace __lsan { -struct ChunkMetadata { - u8 allocated : 8; // Must be first. - ChunkTag tag : 2; - uptr requested_size : 54; - u32 stack_trace_id; -}; - -#if defined(__mips64) || defined(__aarch64__) -static const uptr kMaxAllowedMallocSize = 4UL << 30; -static const uptr kRegionSizeLog = 20; -static const uptr kNumRegions = SANITIZER_MMAP_RANGE_SIZE >> kRegionSizeLog; -typedef TwoLevelByteMap<(kNumRegions >> 12), 1 << 12> ByteMap; -typedef CompactSizeClassMap SizeClassMap; -typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, - sizeof(ChunkMetadata), SizeClassMap, kRegionSizeLog, ByteMap> - PrimaryAllocator; -#else -static const uptr kMaxAllowedMallocSize = 8UL << 30; -static const uptr kAllocatorSpace = 0x600000000000ULL; -static const uptr kAllocatorSize = 0x40000000000ULL; // 4T. -typedef SizeClassAllocator64 PrimaryAllocator; -#endif -typedef SizeClassAllocatorLocalCache AllocatorCache; -typedef LargeMmapAllocator<> SecondaryAllocator; -typedef CombinedAllocator Allocator; static Allocator allocator; +#if !SANITIZER_ANDROID static THREADLOCAL AllocatorCache cache; +#endif // !SANITIZER_ANDROID + + +AllocatorCache* getAllocatorCache(void) { +#if SANITIZER_ANDROID + return &(LsanTLSSlotBigStruct->cache); +#else + return &cache; +#endif // SANITIZER_ANDROID +} + + +#if SANITIZER_ANDROID +void EnsureTLSSlotInit(void) { + if (LsanTLSSlotBigStruct == nullptr) { + struct LsanTLSSlotBig* tlsSlotBigStruct + = (struct LsanTLSSlotBig*) MmapOrDie (sizeof (struct LsanTLSSlotBig), "tlsSlotBigStruct", false); + Printf ("%d calloc'ed tls slot big struct %p (size %d)\n", + GetTid(), tlsSlotBigStruct, sizeof (struct LsanTLSSlotBig)); + + unsigned long i; + for (i = 0; i < sizeof (struct LsanTLSSlotBig); i++) { + ((char*) tlsSlotBigStruct) [i] = 0; + } + + tlsSlotBigStruct->disable_counter = 0; // Redundant + tlsSlotBigStruct->current_thread_tid = -1; // kInvalidTid; + + __get_tls()[TLS_SLOT_TSAN] = tlsSlotBigStruct; + } +} +#endif // SANITIZER_ANDROID + void InitializeAllocator() { allocator.InitLinkerInitialized(common_flags()->allocator_may_return_null); } void AllocatorThreadFinish() { - allocator.SwallowCache(&cache); + allocator.SwallowCache(getAllocatorCache()); } static ChunkMetadata *Metadata(const void *p) { @@ -93,7 +100,7 @@ Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", size); return nullptr; } - void *p = allocator.Allocate(&cache, size, alignment, false); + void *p = allocator.Allocate(getAllocatorCache(), size, alignment, false); // Do not rely on the allocator to clear the memory (it's slow). if (cleared && allocator.FromPrimary(p)) memset(p, 0, size); @@ -107,7 +114,7 @@ if (&__sanitizer_free_hook) __sanitizer_free_hook(p); RunFreeHooks(p); RegisterDeallocation(p); - allocator.Deallocate(&cache, p); + allocator.Deallocate(getAllocatorCache(), p); } void *Reallocate(const StackTrace &stack, void *p, uptr new_size, @@ -115,17 +122,17 @@ RegisterDeallocation(p); if (new_size > kMaxAllowedMallocSize) { Report("WARNING: LeakSanitizer failed to allocate %zu bytes\n", new_size); - allocator.Deallocate(&cache, p); + allocator.Deallocate(getAllocatorCache(), p); return nullptr; } - p = allocator.Reallocate(&cache, p, new_size, alignment); + p = allocator.Reallocate(getAllocatorCache(), p, new_size, alignment); RegisterAllocation(stack, p, new_size); return p; } void GetAllocatorCacheRange(uptr *begin, uptr *end) { - *begin = (uptr)&cache; - *end = *begin + sizeof(cache); + *begin = (uptr)getAllocatorCache(); + *end = *begin + sizeof(*(getAllocatorCache())); } uptr GetMallocUsableSize(const void *p) { Index: lsan_common.h =================================================================== --- lsan_common.h +++ lsan_common.h @@ -22,7 +22,7 @@ #include "sanitizer_common/sanitizer_stoptheworld.h" #include "sanitizer_common/sanitizer_symbolizer.h" -#if (SANITIZER_LINUX && !SANITIZER_ANDROID) && (SANITIZER_WORDSIZE == 64) \ +#if (SANITIZER_LINUX) && (SANITIZER_WORDSIZE == 64) \ && (defined(__x86_64__) || defined(__mips64) || defined(__aarch64__)) #define CAN_SANITIZE_LEAKS 1 #else Index: lsan_common.cc =================================================================== --- lsan_common.cc +++ lsan_common.cc @@ -14,6 +14,8 @@ #include "lsan_common.h" +#include "lsan_allocator.h" + #include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_flags.h" #include "sanitizer_common/sanitizer_flag_parser.h" @@ -32,17 +34,31 @@ // also to protect the global list of root regions. BlockingMutex global_mutex(LINKER_INITIALIZED); +#if !SANITIZER_ANDROID THREADLOCAL int disable_counter; -bool DisabledInThisThread() { return disable_counter > 0; } -void DisableInThisThread() { disable_counter++; } +#endif // !SANITIZER_ANDROID + + +int* getDisableCounter(void) { +#if SANITIZER_ANDROID + return &(LsanTLSSlotBigStruct->disable_counter); +#else + return &disable_counter; +#endif // SANITIZER_ANDROID +} + + +bool DisabledInThisThread() { return *(getDisableCounter()) > 0; } +void DisableInThisThread() { (*(getDisableCounter()))++; } void EnableInThisThread() { - if (!disable_counter && common_flags()->detect_leaks) { + if (!(*(getDisableCounter())) && common_flags()->detect_leaks) { Report("Unmatched call to __lsan_enable().\n"); Die(); } - disable_counter--; + (*(getDisableCounter()))--; } + Flags lsan_flags; void Flags::SetDefaults() { Index: lsan_flags.inc =================================================================== --- lsan_flags.inc +++ lsan_flags.inc @@ -30,8 +30,13 @@ "Root set: include global variables (.data and .bss)") LSAN_FLAG(bool, use_stacks, true, "Root set: include thread stacks") LSAN_FLAG(bool, use_registers, true, "Root set: include thread registers") +#if SANITIZER_ANDROID +LSAN_FLAG(bool, use_tls, false, + "Root set: include TLS and thread-specific storage") +#else LSAN_FLAG(bool, use_tls, true, "Root set: include TLS and thread-specific storage") +#endif // SANITIZER_ANDROID LSAN_FLAG(bool, use_root_regions, true, "Root set: include regions added via __lsan_register_root_region().") LSAN_FLAG(bool, use_ld_allocations, true, Index: lsan_thread.cc =================================================================== --- lsan_thread.cc +++ lsan_thread.cc @@ -25,7 +25,9 @@ const u32 kInvalidTid = (u32) -1; static ThreadRegistry *thread_registry; +#if !SANITIZER_ANDROID static THREADLOCAL u32 current_thread_tid = kInvalidTid; +#endif // !SANITIZER_ANDROID static ThreadContextBase *CreateThreadContext(u32 tid) { void *mem = MmapOrDie(sizeof(ThreadContext), "ThreadContext"); @@ -41,12 +43,25 @@ ThreadRegistry(CreateThreadContext, kMaxThreads, kThreadQuarantineSize); } + +u32* getCurrentThreadTID(void) { +#if SANITIZER_ANDROID + return &(LsanTLSSlotBigStruct->current_thread_tid); +#else + return ¤t_thread_tid; +#endif // SANITIZER_ANDROID; +} + u32 GetCurrentThread() { - return current_thread_tid; + return *(getCurrentThreadTID()); } void SetCurrentThread(u32 tid) { - current_thread_tid = tid; +#if SANITIZER_ANDROID + EnsureTLSSlotInit(); +#endif // SANITIZER_ANDROID + + *(getCurrentThreadTID()) = tid; } ThreadContext::ThreadContext(int tid)