Index: lib/tsan/rtl/tsan_platform_linux.cc =================================================================== --- lib/tsan/rtl/tsan_platform_linux.cc +++ lib/tsan/rtl/tsan_platform_linux.cc @@ -132,17 +132,6 @@ } #ifndef SANITIZER_GO -static void ProtectRange(uptr beg, uptr end) { - CHECK_LE(beg, end); - if (beg == end) - return; - if (beg != (uptr)MmapNoAccess(beg, end - beg)) { - Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end); - Printf("FATAL: Make sure you are not using unlimited stack\n"); - Die(); - } -} - // Mark shadow for .rodata sections with the special kShadowRodata marker. // Accesses to .rodata can't race, so this saves time, memory and trace space. static void MapRodata() { @@ -200,58 +189,7 @@ internal_close(fd); } -void InitializeShadowMemory() { - // Map memory shadow. - uptr shadow = - (uptr)MmapFixedNoReserve(kShadowBeg, kShadowEnd - kShadowBeg, "shadow"); - if (shadow != kShadowBeg) { - Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); - Printf("FATAL: Make sure to compile with -fPIE and " - "to link with -pie (%p, %p).\n", shadow, kShadowBeg); - Die(); - } - // This memory range is used for thread stacks and large user mmaps. - // Frequently a thread uses only a small part of stack and similarly - // a program uses a small part of large mmap. On some programs - // we see 20% memory usage reduction without huge pages for this range. - // FIXME: don't use constants here. -#if defined(__x86_64__) - const uptr kMadviseRangeBeg = 0x7f0000000000ull; - const uptr kMadviseRangeSize = 0x010000000000ull; -#elif defined(__mips64) - const uptr kMadviseRangeBeg = 0xff00000000ull; - const uptr kMadviseRangeSize = 0x0100000000ull; -#elif defined(__aarch64__) - const uptr kMadviseRangeBeg = 0x7e00000000ull; - const uptr kMadviseRangeSize = 0x0100000000ull; -#endif - NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg), - kMadviseRangeSize * kShadowMultiplier); - // Meta shadow is compressing and we don't flush it, - // so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory. - // On one program it reduces memory consumption from 5GB to 2.5GB. - NoHugePagesInRegion(kMetaShadowBeg, kMetaShadowEnd - kMetaShadowBeg); - if (common_flags()->use_madv_dontdump) - DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg); - DPrintf("memory shadow: %zx-%zx (%zuGB)\n", - kShadowBeg, kShadowEnd, - (kShadowEnd - kShadowBeg) >> 30); - - // Map meta shadow. - uptr meta_size = kMetaShadowEnd - kMetaShadowBeg; - uptr meta = - (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size, "meta shadow"); - if (meta != kMetaShadowBeg) { - Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); - Printf("FATAL: Make sure to compile with -fPIE and " - "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg); - Die(); - } - if (common_flags()->use_madv_dontdump) - DontDumpShadowMemory(meta, meta_size); - DPrintf("meta shadow: %zx-%zx (%zuGB)\n", - meta, meta + meta_size, meta_size >> 30); - +void InitializeShadowMemoryPlatform() { MapRodata(); } @@ -295,31 +233,6 @@ CHECK_LT((uptr)&g_data_start, g_data_end); } -static void CheckAndProtect() { - // Ensure that the binary is indeed compiled with -pie. - MemoryMappingLayout proc_maps(true); - uptr p, end; - while (proc_maps.Next(&p, &end, 0, 0, 0, 0)) { - if (IsAppMem(p)) - continue; - if (p >= kHeapMemEnd && - p < HeapEnd()) - continue; - if (p >= kVdsoBeg) // vdso - break; - Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end); - Die(); - } - - ProtectRange(kLoAppMemEnd, kShadowBeg); - ProtectRange(kShadowEnd, kMetaShadowBeg); - ProtectRange(kMetaShadowEnd, kTraceMemBeg); - // Memory for traces is mapped lazily in MapThreadTrace. - // Protect the whole range for now, so that user does not map something here. - ProtectRange(kTraceMemBeg, kTraceMemEnd); - ProtectRange(kTraceMemEnd, kHeapMemBeg); - ProtectRange(HeapEnd(), kHiAppMemBeg); -} #endif // #ifndef SANITIZER_GO void InitializePlatform() { Index: lib/tsan/rtl/tsan_platform_mac.cc =================================================================== --- lib/tsan/rtl/tsan_platform_mac.cc +++ lib/tsan/rtl/tsan_platform_mac.cc @@ -51,28 +51,14 @@ } #ifndef SANITIZER_GO -void InitializeShadowMemory() { - uptr shadow = (uptr)MmapFixedNoReserve(kShadowBeg, - kShadowEnd - kShadowBeg); - if (shadow != kShadowBeg) { - Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); - Printf("FATAL: Make sure to compile with -fPIE and " - "to link with -pie.\n"); - Die(); - } - if (common_flags()->use_madv_dontdump) - DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg); - DPrintf("kShadow %zx-%zx (%zuGB)\n", - kShadowBeg, kShadowEnd, - (kShadowEnd - kShadowBeg) >> 30); - DPrintf("kAppMem %zx-%zx (%zuGB)\n", - kAppMemBeg, kAppMemEnd, - (kAppMemEnd - kAppMemBeg) >> 30); -} +void InitializeShadowMemoryPlatform() { } #endif void InitializePlatform() { DisableCoreDumperIfNecessary(); +#ifndef SANITIZER_GO + CheckAndProtect(); +#endif } #ifndef SANITIZER_GO Index: lib/tsan/rtl/tsan_rtl.h =================================================================== --- lib/tsan/rtl/tsan_rtl.h +++ lib/tsan/rtl/tsan_rtl.h @@ -579,6 +579,8 @@ void MapThreadTrace(uptr addr, uptr size, const char *name); void DontNeedShadowFor(uptr addr, uptr size); void InitializeShadowMemory(); +void CheckAndProtect(); +void InitializeShadowMemoryPlatform(); void InitializeInterceptors(); void InitializeLibIgnore(); void InitializeDynamicAnnotations(); Index: lib/tsan/rtl/tsan_rtl.cc =================================================================== --- lib/tsan/rtl/tsan_rtl.cc +++ lib/tsan/rtl/tsan_rtl.cc @@ -17,6 +17,7 @@ #include "sanitizer_common/sanitizer_libc.h" #include "sanitizer_common/sanitizer_stackdepot.h" #include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_symbolizer.h" #include "tsan_defs.h" #include "tsan_platform.h" @@ -306,6 +307,100 @@ } } +#ifndef SANITIZER_GO +void InitializeShadowMemory() { + // Map memory shadow. + uptr shadow = + (uptr)MmapFixedNoReserve(kShadowBeg, kShadowEnd - kShadowBeg, "shadow"); + if (shadow != kShadowBeg) { + Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); + Printf("FATAL: Make sure to compile with -fPIE and " + "to link with -pie (%p, %p).\n", shadow, kShadowBeg); + Die(); + } + // This memory range is used for thread stacks and large user mmaps. + // Frequently a thread uses only a small part of stack and similarly + // a program uses a small part of large mmap. On some programs + // we see 20% memory usage reduction without huge pages for this range. + // FIXME: don't use constants here. +#if defined(__x86_64__) + const uptr kMadviseRangeBeg = 0x7f0000000000ull; + const uptr kMadviseRangeSize = 0x010000000000ull; +#elif defined(__mips64) + const uptr kMadviseRangeBeg = 0xff00000000ull; + const uptr kMadviseRangeSize = 0x0100000000ull; +#elif defined(__aarch64__) + const uptr kMadviseRangeBeg = 0x7e00000000ull; + const uptr kMadviseRangeSize = 0x0100000000ull; +#endif + NoHugePagesInRegion(MemToShadow(kMadviseRangeBeg), + kMadviseRangeSize * kShadowMultiplier); + // Meta shadow is compressing and we don't flush it, + // so it makes sense to mark it as NOHUGEPAGE to not over-allocate memory. + // On one program it reduces memory consumption from 5GB to 2.5GB. + NoHugePagesInRegion(kMetaShadowBeg, kMetaShadowEnd - kMetaShadowBeg); + if (common_flags()->use_madv_dontdump) + DontDumpShadowMemory(kShadowBeg, kShadowEnd - kShadowBeg); + DPrintf("memory shadow: %zx-%zx (%zuGB)\n", + kShadowBeg, kShadowEnd, + (kShadowEnd - kShadowBeg) >> 30); + + // Map meta shadow. + uptr meta_size = kMetaShadowEnd - kMetaShadowBeg; + uptr meta = + (uptr)MmapFixedNoReserve(kMetaShadowBeg, meta_size, "meta shadow"); + if (meta != kMetaShadowBeg) { + Printf("FATAL: ThreadSanitizer can not mmap the shadow memory\n"); + Printf("FATAL: Make sure to compile with -fPIE and " + "to link with -pie (%p, %p).\n", meta, kMetaShadowBeg); + Die(); + } + if (common_flags()->use_madv_dontdump) + DontDumpShadowMemory(meta, meta_size); + DPrintf("meta shadow: %zx-%zx (%zuGB)\n", + meta, meta + meta_size, meta_size >> 30); +} + +static void ProtectRange(uptr beg, uptr end) { + CHECK_LE(beg, end); + if (beg == end) + return; + if (beg != (uptr)MmapNoAccess(beg, end - beg)) { + Printf("FATAL: ThreadSanitizer can not protect [%zx,%zx]\n", beg, end); + Printf("FATAL: Make sure you are not using unlimited stack\n"); + Die(); + } +} + +void CheckAndProtect() { + // Ensure that the binary is indeed compiled with -pie. + MemoryMappingLayout proc_maps(true); + uptr p, end, prot; + while (proc_maps.Next(&p, &end, 0, 0, 0, &prot)) { + if (IsAppMem(p)) + continue; + if (p >= kHeapMemEnd && + p < HeapEnd()) + continue; + if (prot == 0) // Zero page or mprotected. + continue; + if (p >= kVdsoBeg) // vdso + break; + Printf("FATAL: ThreadSanitizer: unexpected memory mapping %p-%p\n", p, end); + Die(); + } + + ProtectRange(kLoAppMemEnd, kShadowBeg); + ProtectRange(kShadowEnd, kMetaShadowBeg); + ProtectRange(kMetaShadowEnd, kTraceMemBeg); + // Memory for traces is mapped lazily in MapThreadTrace. + // Protect the whole range for now, so that user does not map something here. + ProtectRange(kTraceMemBeg, kTraceMemEnd); + ProtectRange(kTraceMemEnd, kHeapMemBeg); + ProtectRange(HeapEnd(), kHiAppMemBeg); +} +#endif + void Initialize(ThreadState *thr) { // Thread safe because done before all threads exist. static bool is_initialized = false; @@ -333,6 +428,7 @@ InitializeDynamicAnnotations(); #ifndef SANITIZER_GO InitializeShadowMemory(); + InitializeShadowMemoryPlatform(); #endif // Setup correct file descriptor for error reports. __sanitizer_set_report_path(common_flags()->log_path);