diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform.h b/compiler-rt/lib/tsan/rtl/tsan_platform.h --- a/compiler-rt/lib/tsan/rtl/tsan_platform.h +++ b/compiler-rt/lib/tsan/rtl/tsan_platform.h @@ -23,6 +23,19 @@ namespace __tsan { +enum { + // App memory is not mapped onto shadow memory range. + kBrokenMapping = 1 << 0, + // Mapping app memory and back does not produce the same address, + // this can lead to wrong addresses in reports and potentially + // other bad consequences. + kBrokenReverseMapping = 1 << 1, + // Mapping is non-linear for linear user range. + // This is bad and can lead to unpredictable memory corruptions, etc + // because range access functions assume linearity. + kBrokenLinearity = 1 << 2, +}; + /* C/C++ on linux/x86_64 and freebsd/x86_64 0000 0000 1000 - 0080 0000 0000: main binary and/or MAP_32BIT mappings (512GB) @@ -195,6 +208,7 @@ 3f000 0000 00 - 3ffff ffff ff: modules and main thread stack */ struct MappingAarch64_42 { + static const uptr kBroken = kBrokenReverseMapping; static const uptr kLoAppMemBeg = 0x00000001000ull; static const uptr kLoAppMemEnd = 0x01000000000ull; static const uptr kShadowBeg = 0x10000000000ull; @@ -251,6 +265,8 @@ 0f60 0000 0000 - 1000 0000 0000: modules and main thread stack */ struct MappingPPC64_44 { + static const uptr kBroken = + kBrokenMapping | kBrokenReverseMapping | kBrokenLinearity; static const uptr kMetaShadowBeg = 0x0b0000000000ull; static const uptr kMetaShadowEnd = 0x0d0000000000ull; static const uptr kTraceMemBeg = 0x0d0000000000ull; @@ -724,6 +740,27 @@ #endif } +template +void ForEachMapping() { + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); + Func::template Apply(); +} + enum MappingType { MAPPING_LO_APP_BEG, MAPPING_LO_APP_END, @@ -801,33 +838,6 @@ ALWAYS_INLINE uptr VdsoBeg(void) { return SelectMapping(MAPPING_VDSO_BEG); } -static inline -bool GetUserRegion(int i, uptr *start, uptr *end) { - switch (i) { - case 0: - *start = LoAppMemBeg(); - *end = LoAppMemEnd(); - return true; - case 1: - *start = HiAppMemBeg(); - *end = HiAppMemEnd(); - return true; - case 2: - *start = HeapMemBeg(); - *end = HeapMemEnd(); - return true; - case 3: - if (MidAppMemBeg()) { - *start = MidAppMemBeg(); - *end = MidAppMemEnd(); - return true; - } - FALLTHROUGH; - default: - return false; - } -} - ALWAYS_INLINE uptr ShadowBeg(void) { return SelectMapping(MAPPING_SHADOW_BEG); } ALWAYS_INLINE @@ -920,12 +930,13 @@ struct ShadowToMemImpl { template static uptr Apply(uptr sp) { - DCHECK(IsShadowMemImpl::Apply(sp)); - // The shadow mapping is non-linear and we've lost some bits, so we don't have - // an easy way to restore the original app address. But the mapping is a - // bijection, so we try to restore the address as belonging to low/mid/high - // range consecutively and see if shadow->app->shadow mapping gives us the - // same address. + if (!IsShadowMemImpl::Apply(sp)) + return 0; + // The shadow mapping is non-linear and we've lost some bits, so we don't + // have an easy way to restore the original app address. But the mapping is + // a bijection, so we try to restore the address as belonging to + // low/mid/high range consecutively and see if shadow->app->shadow mapping + // gives us the same address. uptr p = ((sp - Mapping::kShadowAdd) / kShadowCnt) ^ Mapping::kShadowXor; if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd && MemToShadowImpl::Apply(p) == sp) diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp @@ -314,41 +314,6 @@ } } -static void CheckShadowMapping() { - uptr beg, end; - for (int i = 0; GetUserRegion(i, &beg, &end); i++) { - // Skip cases for empty regions (heap definition for architectures that - // do not use 64-bit allocator). - if (beg == end) - continue; - VPrintf(3, "checking shadow region %p-%p\n", beg, end); - uptr prev = 0; - for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 4) { - for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) { - const uptr p = RoundDown(p0 + x, kShadowCell); - if (p < beg || p >= end) - continue; - RawShadow *const s = MemToShadow(p); - u32 *const m = MemToMeta(p); - VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m); - CHECK(IsAppMem(p)); - CHECK(IsShadowMem(s)); - CHECK_EQ(p, ShadowToMem(s)); - CHECK(IsMetaMem(m)); - if (prev) { - // Ensure that shadow and meta mappings are linear within a single - // user range. Lots of code that processes memory ranges assumes it. - RawShadow *const prev_s = MemToShadow(prev); - u32 *const prev_m = MemToMeta(prev); - CHECK_EQ((s - prev_s) * kShadowSize, (p - prev) * kShadowMultiplier); - CHECK_EQ(m - prev_m, (p - prev) / kMetaShadowCell); - } - prev = p; - } - } - } -} - #if !SANITIZER_GO static void OnStackUnwind(const SignalContext &sig, const void *, BufferedStackTrace *stack) { @@ -408,7 +373,6 @@ Processor *proc = ProcCreate(); ProcWire(proc, thr); InitializeInterceptors(); - CheckShadowMapping(); InitializePlatform(); InitializeDynamicAnnotations(); #if !SANITIZER_GO diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_shadow_test.cpp b/compiler-rt/lib/tsan/tests/unit/tsan_shadow_test.cpp --- a/compiler-rt/lib/tsan/tests/unit/tsan_shadow_test.cpp +++ b/compiler-rt/lib/tsan/tests/unit/tsan_shadow_test.cpp @@ -74,4 +74,68 @@ CHECK_EQ(s0 + 2 * kShadowCnt, MemToShadow((uptr)&data[i])); } +// Detect is the Mapping has kBroken field. +template +struct Has { + typedef bool Result; +}; + +template +bool broken(...) { + return false; +} + +template +bool broken(uptr what, typename Has::Result = false) { + return Mapping::kBroken & what; +} + +struct MappingTest { + template + static void Apply() { + // Easy (but ugly) way to print the mapping name. + Printf("%s\n", __PRETTY_FUNCTION__); + TestRegion(Mapping::kLoAppMemBeg, Mapping::kLoAppMemEnd); + TestRegion(Mapping::kMidAppMemBeg, Mapping::kMidAppMemEnd); + TestRegion(Mapping::kHiAppMemBeg, Mapping::kHiAppMemEnd); + TestRegion(Mapping::kHeapMemBeg, Mapping::kHeapMemEnd); + } + + template + static void TestRegion(uptr beg, uptr end) { + if (beg == end) + return; + Printf("checking region [%p-%p)\n", beg, end); + uptr prev = 0; + for (uptr p0 = beg; p0 <= end; p0 += (end - beg) / 256) { + for (int x = -(int)kShadowCell; x <= (int)kShadowCell; x += kShadowCell) { + const uptr p = RoundDown(p0 + x, kShadowCell); + if (p < beg || p >= end) + continue; + const uptr s = MemToShadowImpl::Apply(p); + u32 *const m = MemToMetaImpl::Apply(p); + const uptr r = ShadowToMemImpl::Apply(s); + Printf(" addr=%p: shadow=%p meta=%p reverse=%p\n", p, s, m, r); + CHECK(IsAppMemImpl::Apply(p)); + if (!broken(kBrokenMapping)) + CHECK(IsShadowMemImpl::Apply(s)); + CHECK(IsMetaMemImpl::Apply(reinterpret_cast(m))); + if (!broken(kBrokenReverseMapping)) + CHECK_EQ(p, r); + if (prev && !broken(kBrokenLinearity)) { + // Ensure that shadow and meta mappings are linear within a single + // user range. Lots of code that processes memory ranges assumes it. + const uptr prev_s = MemToShadowImpl::Apply(prev); + u32 *const prev_m = MemToMetaImpl::Apply(prev); + CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier); + CHECK_EQ(m - prev_m, (p - prev) / kMetaShadowCell); + } + prev = p; + } + } + } +}; + +TEST(Shadow, AllMappings) { ForEachMapping(); } + } // namespace __tsan