Index: lib/tsan/rtl/tsan_platform.h =================================================================== --- lib/tsan/rtl/tsan_platform.h +++ lib/tsan/rtl/tsan_platform.h @@ -169,6 +169,40 @@ static const uptr kVdsoBeg = 0x37f00000000ull; }; +/* +C/C++ on linux/aarch64 (42-bit VMA) +0 0000 0000 1000 - 0 0002 0000 0000: main binary +0 0002 0000 0000 - 0 0020 0000 0000: - +0 0020 0000 0000 - 0 0040 0000 0000: shadow memory +0 0040 0000 0000 - 0 0050 0000 0000: - +0 0050 0000 0000 - 0 0060 0000 0000: metainfo +0 0060 0000 0000 - 0 aaaa 0000 0000: - +0 aaaa 0000 0000 - 0 aaaf 0000 0000: main binary (PIE) +0 aaaf 0000 0000 - 0 f060 0000 0000: - +0 f060 0000 0000 - 0 f062 0000 0000: traces +0 ffff 0000 0000 - 1 0000 0000 0000: modules and main thread stack +*/ +struct Mapping48 { + static const uptr kLoAppMemBeg = 0x0000000001000ull; + static const uptr kLoAppMemEnd = 0x0000200000000ull; + static const uptr kShadowBeg = 0x0002000000000ull; + static const uptr kShadowEnd = 0x0004000000000ull; + static const uptr kMetaShadowBeg = 0x0005000000000ull; + static const uptr kMetaShadowEnd = 0x0006000000000ull; + static const uptr kMidAppMemBeg = 0x0aaaa00000000ull; + static const uptr kMidAppMemEnd = 0x0aaaf00000000ull; + static const uptr kMidShadowOff = 0x0aaa800000000ull; + static const uptr kTraceMemBeg = 0x0f06000000000ull; + static const uptr kTraceMemEnd = 0x0f06200000000ull; + static const uptr kHeapMemBeg = 0x0ffff00000000ull; + static const uptr kHeapMemEnd = 0x0ffff00000000ull; + static const uptr kHiAppMemBeg = 0x0ffff00001000ull; + static const uptr kHiAppMemEnd = 0x1000000000000ull; + static const uptr kAppMemMsk = 0x0fff800000000ull; + static const uptr kAppMemXor = 0x0000800000000ull; + static const uptr kVdsoBeg = 0xffff000000000ull; +}; + // Indicates the runtime will define the memory regions at runtime. #define TSAN_RUNTIME_VMA 1 // Indicates that mapping defines a mid range memory segment. @@ -362,11 +396,13 @@ template uptr MappingArchImpl(void) { #ifdef __aarch64__ - if (vmaSize == 39) - return MappingImpl(); - else - return MappingImpl(); + switch (vmaSize) { + case 39: return MappingImpl(); + case 42: return MappingImpl(); + case 48: return MappingImpl(); + } DCHECK(0); + return 0; #elif defined(__powerpc64__) if (vmaSize == 44) return MappingImpl(); @@ -449,16 +485,21 @@ *start = HiAppMemBeg(); *end = HiAppMemEnd(); return true; - case 2: - *start = HeapMemBeg(); - *end = HeapMemEnd(); - return true; # ifdef TSAN_MID_APP_RANGE - case 3: + case 2: *start = MidAppMemBeg(); *end = MidAppMemEnd(); return true; # endif +// The heap region is used exclusively on tsan for SizeClassAllocator64, so +// architectures that do not used it might just not define it as an user +// region. +# if SANITIZER_CAN_USE_ALLOCATOR64 + case 3: + *start = HeapMemBeg(); + *end = HeapMemEnd(); + return true; +# endif #else case 0: *start = AppMemBeg(); @@ -513,11 +554,13 @@ ALWAYS_INLINE bool IsAppMem(uptr mem) { #ifdef __aarch64__ - if (vmaSize == 39) - return IsAppMemImpl(mem); - else - return IsAppMemImpl(mem); + switch (vmaSize) { + case 39: return IsAppMemImpl(mem); + case 42: return IsAppMemImpl(mem); + case 48: return IsAppMemImpl(mem); + } DCHECK(0); + return false; #elif defined(__powerpc64__) if (vmaSize == 44) return IsAppMemImpl(mem); @@ -538,11 +581,13 @@ ALWAYS_INLINE bool IsShadowMem(uptr mem) { #ifdef __aarch64__ - if (vmaSize == 39) - return IsShadowMemImpl(mem); - else - return IsShadowMemImpl(mem); + switch (vmaSize) { + case 39: return IsShadowMemImpl(mem); + case 42: return IsShadowMemImpl(mem); + case 48: return IsShadowMemImpl(mem); + } DCHECK(0); + return false; #elif defined(__powerpc64__) if (vmaSize == 44) return IsShadowMemImpl(mem); @@ -563,11 +608,13 @@ ALWAYS_INLINE bool IsMetaMem(uptr mem) { #ifdef __aarch64__ - if (vmaSize == 39) - return IsMetaMemImpl(mem); - else - return IsMetaMemImpl(mem); + switch (vmaSize) { + case 39: return IsMetaMemImpl(mem); + case 42: return IsMetaMemImpl(mem); + case 48: return IsMetaMemImpl(mem); + } DCHECK(0); + return false; #elif defined(__powerpc64__) if (vmaSize == 44) return IsMetaMemImpl(mem); @@ -598,11 +645,13 @@ ALWAYS_INLINE uptr MemToShadow(uptr x) { #ifdef __aarch64__ - if (vmaSize == 39) - return MemToShadowImpl(x); - else - return MemToShadowImpl(x); + switch (vmaSize) { + case 39: return MemToShadowImpl(x); + case 42: return MemToShadowImpl(x); + case 48: return MemToShadowImpl(x); + } DCHECK(0); + return 0; #elif defined(__powerpc64__) if (vmaSize == 44) return MemToShadowImpl(x); @@ -631,11 +680,13 @@ ALWAYS_INLINE u32 *MemToMeta(uptr x) { #ifdef __aarch64__ - if (vmaSize == 39) - return MemToMetaImpl(x); - else - return MemToMetaImpl(x); + switch (vmaSize) { + case 39: return MemToMetaImpl(x); + case 42: return MemToMetaImpl(x); + case 48: return MemToMetaImpl(x); + } DCHECK(0); + return 0; #elif defined(__powerpc64__) if (vmaSize == 44) return MemToMetaImpl(x); @@ -674,11 +725,13 @@ ALWAYS_INLINE uptr ShadowToMem(uptr s) { #ifdef __aarch64__ - if (vmaSize == 39) - return ShadowToMemImpl(s); - else - return ShadowToMemImpl(s); + switch (vmaSize) { + case 39: return ShadowToMemImpl(s); + case 42: return ShadowToMemImpl(s); + case 48: return ShadowToMemImpl(s); + } DCHECK(0); + return 0; #elif defined(__powerpc64__) if (vmaSize == 44) return ShadowToMemImpl(s); @@ -707,11 +760,13 @@ ALWAYS_INLINE uptr GetThreadTrace(int tid) { #ifdef __aarch64__ - if (vmaSize == 39) - return GetThreadTraceImpl(tid); - else - return GetThreadTraceImpl(tid); + switch (vmaSize) { + case 39: return GetThreadTraceImpl(tid); + case 42: return GetThreadTraceImpl(tid); + case 48: return GetThreadTraceImpl(tid); + } DCHECK(0); + return 0; #elif defined(__powerpc64__) if (vmaSize == 44) return GetThreadTraceImpl(tid); @@ -735,11 +790,13 @@ ALWAYS_INLINE uptr GetThreadTraceHeader(int tid) { #ifdef __aarch64__ - if (vmaSize == 39) - return GetThreadTraceHeaderImpl(tid); - else - return GetThreadTraceHeaderImpl(tid); + switch (vmaSize) { + case 39: return GetThreadTraceHeaderImpl(tid); + case 42: return GetThreadTraceHeaderImpl(tid); + case 48: return GetThreadTraceHeaderImpl(tid); + } DCHECK(0); + return 0; #elif defined(__powerpc64__) if (vmaSize == 44) return GetThreadTraceHeaderImpl(tid); Index: lib/tsan/rtl/tsan_platform_linux.cc =================================================================== --- lib/tsan/rtl/tsan_platform_linux.cc +++ lib/tsan/rtl/tsan_platform_linux.cc @@ -94,8 +94,10 @@ else if (p >= MetaShadowBeg() && p < MetaShadowEnd()) mem[MemMeta] += rss; #ifndef SANITIZER_GO +# if SANITIZER_CAN_USE_ALLOCATOR64 else if (p >= HeapMemBeg() && p < HeapMemEnd()) mem[MemHeap] += rss; +# endif else if (p >= LoAppMemBeg() && p < LoAppMemEnd()) mem[file ? MemFile : MemMmap] += rss; else if (p >= HiAppMemBeg() && p < HiAppMemEnd()) @@ -208,9 +210,9 @@ vmaSize = (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1); #if defined(__aarch64__) - if (vmaSize != 39 && vmaSize != 42) { + if (vmaSize != 39 && vmaSize != 42 && vmaSize != 48) { Printf("FATAL: ThreadSanitizer: unsupported VMA range\n"); - Printf("FATAL: Found %d - Supported 39 and 42\n", vmaSize); + Printf("FATAL: Found %d - Supported 39, 42 and 48\n", vmaSize); Die(); } #elif defined(__powerpc64__) Index: lib/tsan/rtl/tsan_platform_posix.cc =================================================================== --- lib/tsan/rtl/tsan_platform_posix.cc +++ lib/tsan/rtl/tsan_platform_posix.cc @@ -141,7 +141,10 @@ // Memory for traces is mapped lazily in MapThreadTrace. // Protect the whole range for now, so that user does not map something here. ProtectRange(TraceMemBeg(), TraceMemEnd()); + // The heap region is used exclusively on tsan for SizeClassAllocator64. +#if SANITIZER_CAN_USE_ALLOCATOR64 ProtectRange(TraceMemEnd(), HeapMemBeg()); +#endif ProtectRange(HeapEnd(), HiAppMemBeg()); } #endif Index: lib/tsan/rtl/tsan_sync.h =================================================================== --- lib/tsan/rtl/tsan_sync.h +++ lib/tsan/rtl/tsan_sync.h @@ -51,15 +51,15 @@ u64 GetId() const { // 47 lsb is addr, then 14 bits is low part of uid, then 3 zero bits. - return GetLsb((u64)addr | (uid << 47), 61); + return GetLsb((u64)addr | (uid << 48), 60); } bool CheckId(u64 uid) const { CHECK_EQ(uid, GetLsb(uid, 14)); return GetLsb(this->uid, 14) == uid; } static uptr SplitId(u64 id, u64 *uid) { - *uid = id >> 47; - return (uptr)GetLsb(id, 47); + *uid = id >> 48; + return (uptr)GetLsb(id, 48); } };