diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h --- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h +++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h @@ -120,6 +120,11 @@ void MprotectMallocZones(void *addr, int prot); +#if SANITIZER_WINDOWS +// Zero previously mmap'd memory. Currently used only on Windows. +bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT; +#endif + #if SANITIZER_LINUX // Unmap memory. Currently only used on Linux. void UnmapFromTo(uptr from, uptr to); diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp --- a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp +++ b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp @@ -234,6 +234,17 @@ return (void *)mapped_addr; } +// ZeroMmapFixedRegion zero's out a region of memory previously returned from a +// call to one of the MmapFixed* helpers. On non-windows systems this would be +// done with another mmap, but on windows remapping is not an option. +// VirtualFree(DECOMMIT)+VirtualAlloc(RECOMMIT) would also be a way to zero the +// memory, but we can't do this atomically, so instead we fall back to using +// internal_memset. +bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) { + internal_memset((void*) fixed_addr, 0, size); + return true; +} + bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name) { // FIXME: is this really "NoReserve"? On Win32 this does not matter much, // but on Win64 it does. diff --git a/compiler-rt/lib/tsan/go/build.bat b/compiler-rt/lib/tsan/go/build.bat --- a/compiler-rt/lib/tsan/go/build.bat +++ b/compiler-rt/lib/tsan/go/build.bat @@ -57,6 +57,7 @@ -Wno-format ^ -Wno-maybe-uninitialized ^ -DSANITIZER_DEBUG=0 ^ + -DSANITIZER_WINDOWS=1 ^ -O3 ^ -fomit-frame-pointer ^ -msse3 ^ diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform.h b/compiler-rt/lib/tsan/rtl/tsan_platform.h --- a/compiler-rt/lib/tsan/rtl/tsan_platform.h +++ b/compiler-rt/lib/tsan/rtl/tsan_platform.h @@ -394,6 +394,7 @@ 0300 0000 0000 - 0700 0000 0000: - 0700 0000 0000 - 0770 0000 0000: metainfo (memory blocks and sync objects) 07d0 0000 0000 - 8000 0000 0000: - +PIE binaries currently not supported, but it should be theoretically possible. */ struct MappingGoWindows { diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -372,6 +372,10 @@ uptr trace_part_total_allocated SANITIZER_GUARDED_BY(slot_mtx); uptr trace_part_recycle_finished SANITIZER_GUARDED_BY(slot_mtx); uptr trace_part_finished_excess SANITIZER_GUARDED_BY(slot_mtx); +#if SANITIZER_GO + uptr mapped_shadow_begin; + uptr mapped_shadow_end; +#endif }; extern Context *ctx; // The one and the only global runtime context. diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp @@ -197,8 +197,24 @@ } DPrintf("Resetting shadow...\n"); - if (!MmapFixedSuperNoReserve(ShadowBeg(), ShadowEnd() - ShadowBeg(), - "shadow")) { + auto shadow_begin = ShadowBeg(); + auto shadow_end = ShadowEnd(); +#if SANITIZER_GO + CHECK_NE(0, ctx->mapped_shadow_begin); + shadow_begin = ctx->mapped_shadow_begin; + shadow_end = ctx->mapped_shadow_end; + VPrintf(2, "shadow_begin-shadow_end: (0x%zx-0x%zx)\n", + shadow_begin, shadow_end); +#endif + +#if SANITIZER_WINDOWS + auto resetFailed = + !ZeroMmapFixedRegion(shadow_begin, shadow_end - shadow_begin); +#else + auto resetFailed = + !MmapFixedSuperNoReserve(shadow_begin, shadow_end-shadow_begin, "shadow"); +#endif + if (resetFailed) { Printf("failed to reset shadow memory\n"); Die(); } @@ -557,18 +573,50 @@ #endif void MapShadow(uptr addr, uptr size) { + // Ensure thead registry lock held, so as to synchronize + // with DoReset, which also access the mapped_shadow_* ctxt fields. + ThreadRegistryLock lock0(&ctx->thread_registry); + static bool data_mapped = false; + +#if !SANITIZER_GO // Global data is not 64K aligned, but there are no adjacent mappings, // so we can get away with unaligned mapping. // CHECK_EQ(addr, addr & ~((64 << 10) - 1)); // windows wants 64K alignment const uptr kPageSize = GetPageSizeCached(); uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), kPageSize); uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), kPageSize); - if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, - "shadow")) + if (!MmapFixedNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow")) Die(); +#else + uptr shadow_begin = RoundDownTo((uptr)MemToShadow(addr), (64 << 10)); + uptr shadow_end = RoundUpTo((uptr)MemToShadow(addr + size), (64 << 10)); + VPrintf(2, "MapShadow for (0x%zx-0x%zx), begin/end: (0x%zx-0x%zx)\n", + addr, addr + size, shadow_begin, shadow_end); + + if (!data_mapped) { + // First call maps data+bss. + if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, "shadow")) + Die(); + } else { + VPrintf(2, "ctx->mapped_shadow_{begin,end} = (0x%zx-0x%zx)\n", + ctx->mapped_shadow_begin, ctx->mapped_shadow_end); + // Second and subsequent calls map heap. + if (shadow_end <= ctx->mapped_shadow_end) + return; + if (ctx->mapped_shadow_begin < shadow_begin) + ctx->mapped_shadow_begin = shadow_begin; + if (shadow_begin < ctx->mapped_shadow_end) + shadow_begin = ctx->mapped_shadow_end; + VPrintf(2, "MapShadow begin/end = (0x%zx-0x%zx)\n", + shadow_begin, shadow_end); + if (!MmapFixedSuperNoReserve(shadow_begin, shadow_end - shadow_begin, + "shadow")) + Die(); + ctx->mapped_shadow_end = shadow_end; + } +#endif // Meta shadow is 2:1, so tread carefully. - static bool data_mapped = false; static uptr mapped_meta_end = 0; uptr meta_begin = (uptr)MemToMeta(addr); uptr meta_end = (uptr)MemToMeta(addr + size); @@ -585,8 +633,7 @@ // Windows wants 64K alignment. meta_begin = RoundDownTo(meta_begin, 64 << 10); meta_end = RoundUpTo(meta_end, 64 << 10); - if (meta_end <= mapped_meta_end) - return; + CHECK_GT(meta_end, mapped_meta_end); if (meta_begin < mapped_meta_end) meta_begin = mapped_meta_end; if (!MmapFixedSuperNoReserve(meta_begin, meta_end - meta_begin,