diff --git a/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp b/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_debugging.cpp @@ -197,7 +197,7 @@ if (IsMetaMem(reinterpret_cast(addr))) { region_kind = "meta shadow"; - } else if (IsShadowMem(addr)) { + } else if (IsShadowMem(reinterpret_cast(addr))) { region_kind = "shadow"; } else { bool is_stack = false; diff --git a/compiler-rt/lib/tsan/rtl/tsan_defs.h b/compiler-rt/lib/tsan/rtl/tsan_defs.h --- a/compiler-rt/lib/tsan/rtl/tsan_defs.h +++ b/compiler-rt/lib/tsan/rtl/tsan_defs.h @@ -101,8 +101,9 @@ // That many user bytes are mapped onto a single shadow cell. const uptr kShadowCell = 8; -// Size of a single shadow value (u64). -const uptr kShadowSize = 8; +// Single shadow value. +typedef u64 RawShadow; +const uptr kShadowSize = sizeof(RawShadow); // Shadow memory is kShadowMultiplier times larger than user memory. const uptr kShadowMultiplier = kShadowSize * kShadowCnt / kShadowCell; diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp @@ -2200,7 +2200,7 @@ }; static bool IsAppNotRodata(uptr addr) { - return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata; + return IsAppMem(addr) && *MemToShadow(addr) != kShadowRodata; } static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size, diff --git a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_interface_java.cpp @@ -129,14 +129,14 @@ ctx->metamap.MoveMemory(src, dst, size); // Move shadow. - u64 *s = (u64*)MemToShadow(src); - u64 *d = (u64*)MemToShadow(dst); - u64 *send = (u64*)MemToShadow(src + size); + RawShadow *s = MemToShadow(src); + RawShadow *d = MemToShadow(dst); + RawShadow *send = MemToShadow(src + size); uptr inc = 1; if (dst > src) { - s = (u64*)MemToShadow(src + size) - 1; - d = (u64*)MemToShadow(dst + size) - 1; - send = (u64*)MemToShadow(src) - 1; + s = MemToShadow(src + size) - 1; + d = MemToShadow(dst + size) - 1; + send = MemToShadow(src) - 1; inc = -1; } for (; s != send; s += inc, d += inc) { diff --git a/compiler-rt/lib/tsan/rtl/tsan_platform.h b/compiler-rt/lib/tsan/rtl/tsan_platform.h --- a/compiler-rt/lib/tsan/rtl/tsan_platform.h +++ b/compiler-rt/lib/tsan/rtl/tsan_platform.h @@ -851,7 +851,8 @@ } ALWAYS_INLINE -bool IsShadowMem(uptr mem) { +bool IsShadowMem(RawShadow *p) { + uptr mem = reinterpret_cast(p); #if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO switch (vmaSize) { case 39: return IsShadowMemImpl(mem); @@ -885,7 +886,6 @@ #endif } - template bool IsMetaMemImpl(uptr mem) { return mem >= Mapping::kMetaShadowBeg && mem <= Mapping::kMetaShadowEnd; @@ -927,8 +927,8 @@ #endif } -template -uptr MemToShadowImpl(uptr x) { +template +uptr MemToShadowRaw(uptr x) { DCHECK(IsAppMem(x)); #if !SANITIZER_GO return (((x) & ~(Mapping::kAppMemMsk | (kShadowCell - 1))) @@ -942,8 +942,13 @@ #endif } +template +RawShadow *MemToShadowImpl(uptr x) { + return reinterpret_cast(MemToShadowRaw(x)); +} + ALWAYS_INLINE -uptr MemToShadow(uptr x) { +RawShadow *MemToShadow(uptr x) { #if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO switch (vmaSize) { case 39: return MemToShadowImpl(x); @@ -951,7 +956,7 @@ case 48: return MemToShadowImpl(x); } DCHECK(0); - return 0; + return nullptr; #elif defined(__powerpc64__) switch (vmaSize) { #if !SANITIZER_GO @@ -961,7 +966,7 @@ case 47: return MemToShadowImpl(x); } DCHECK(0); - return 0; + return nullptr; #elif defined(__mips64) switch (vmaSize) { #if !SANITIZER_GO @@ -971,13 +976,12 @@ #endif } DCHECK(0); - return 0; + return nullptr; #else return MemToShadowImpl(x); #endif } - template u32 *MemToMetaImpl(uptr x) { DCHECK(IsAppMem(x)); @@ -1030,39 +1034,39 @@ #endif } - -template -uptr ShadowToMemImpl(uptr s) { +template +uptr ShadowToMemImpl(RawShadow *s) { DCHECK(IsShadowMem(s)); + uptr sp = reinterpret_cast(s); #if !SANITIZER_GO // The shadow mapping is non-linear and we've lost some bits, so we don't have // an easy way to restore the original app address. But the mapping is a // bijection, so we try to restore the address as belonging to low/mid/high // range consecutively and see if shadow->app->shadow mapping gives us the // same address. - uptr p = (s / kShadowCnt) ^ Mapping::kAppMemXor; + uptr p = (sp / kShadowCnt) ^ Mapping::kAppMemXor; if (p >= Mapping::kLoAppMemBeg && p < Mapping::kLoAppMemEnd && MemToShadow(p) == s) return p; # ifdef TSAN_MID_APP_RANGE - p = ((s / kShadowCnt) ^ Mapping::kAppMemXor) + + p = ((sp / kShadowCnt) ^ Mapping::kAppMemXor) + (Mapping::kMidAppMemBeg & Mapping::kAppMemMsk); if (p >= Mapping::kMidAppMemBeg && p < Mapping::kMidAppMemEnd && MemToShadow(p) == s) return p; # endif - return ((s / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk; + return ((sp / kShadowCnt) ^ Mapping::kAppMemXor) | Mapping::kAppMemMsk; #else // #if !SANITIZER_GO # ifndef SANITIZER_WINDOWS - return (s & ~Mapping::kShadowBeg) / kShadowCnt; + return (sp & ~Mapping::kShadowBeg) / kShadowCnt; # else - return (s - Mapping::kShadowBeg) / kShadowCnt; + return (sp - Mapping::kShadowBeg) / kShadowCnt; # endif // SANITIZER_WINDOWS #endif } ALWAYS_INLINE -uptr ShadowToMem(uptr s) { +uptr ShadowToMem(RawShadow *s) { #if defined(__aarch64__) && !defined(__APPLE__) && !SANITIZER_GO switch (vmaSize) { case 39: return ShadowToMemImpl(s); @@ -1096,8 +1100,6 @@ #endif } - - // The additional page is to catch shadow stack overflow as paging fault. // Windows wants 64K alignment for mmaps. const uptr kTotalTraceSize = (kTraceSize * sizeof(Event) + sizeof(Trace) diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.h b/compiler-rt/lib/tsan/rtl/tsan_rtl.h --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.h +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.h @@ -84,7 +84,7 @@ Allocator *allocator(); #endif -const u64 kShadowRodata = (u64)-1; // .rodata shadow marker +const RawShadow kShadowRodata = (RawShadow)-1; // .rodata shadow marker // FastState (from most significant bit): // ignore : 1 @@ -393,8 +393,8 @@ uptr *shadow_stack; uptr *shadow_stack_end; uptr *shadow_stack_pos; - u64 *racy_shadow_addr; - u64 racy_state[2]; + RawShadow *racy_shadow_addr; + RawShadow racy_state[2]; MutexSet mset; ThreadClock clock; #if !SANITIZER_GO diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp @@ -247,7 +247,8 @@ #endif void DontNeedShadowFor(uptr addr, uptr size) { - ReleaseMemoryPagesToOS(MemToShadow(addr), MemToShadow(addr + size)); + ReleaseMemoryPagesToOS(reinterpret_cast(MemToShadow(addr)), + reinterpret_cast(MemToShadow(addr + size))); } #if !SANITIZER_GO @@ -327,7 +328,7 @@ const uptr p = RoundDown(p0 + x, kShadowCell); if (p < beg || p >= end) continue; - const uptr s = MemToShadow(p); + RawShadow *const s = MemToShadow(p); u32 *const m = MemToMeta(p); VPrintf(3, " checking pointer %p: shadow=%p meta=%p\n", p, s, m); CHECK(IsAppMem(p)); @@ -337,9 +338,9 @@ if (prev) { // Ensure that shadow and meta mappings are linear within a single // user range. Lots of code that processes memory ranges assumes it. - const uptr prev_s = MemToShadow(prev); + RawShadow *const prev_s = MemToShadow(prev); u32 *const prev_m = MemToMeta(prev); - CHECK_EQ(s - prev_s, (p - prev) * kShadowMultiplier); + CHECK_EQ((s - prev_s) * kShadowSize, (p - prev) * kShadowMultiplier); CHECK_EQ(m - prev_m, (p - prev) / kMetaShadowCell); } prev = p; @@ -847,7 +848,7 @@ ALWAYS_INLINE USED void MemoryAccess(ThreadState *thr, uptr pc, uptr addr, int kAccessSizeLog, bool kAccessIsWrite, bool kIsAtomic) { - u64 *shadow_mem = (u64*)MemToShadow(addr); + RawShadow *shadow_mem = MemToShadow(addr); DPrintf2("#%d: MemoryAccess: @%p %p size=%d" " is_write=%d shadow_mem=%p {%zx, %zx, %zx, %zx}\n", (int)thr->fast_state.tid(), (void*)pc, (void*)addr, @@ -859,9 +860,9 @@ Printf("Access to non app mem %zx\n", addr); DCHECK(IsAppMem(addr)); } - if (!IsShadowMem((uptr)shadow_mem)) { + if (!IsShadowMem(shadow_mem)) { Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); - DCHECK(IsShadowMem((uptr)shadow_mem)); + DCHECK(IsShadowMem(shadow_mem)); } #endif @@ -936,9 +937,9 @@ size = (size + (kShadowCell - 1)) & ~(kShadowCell - 1); // UnmapOrDie/MmapFixedNoReserve does not work on Windows. if (SANITIZER_WINDOWS || size < common_flags()->clear_shadow_mmap_threshold) { - u64 *p = (u64*)MemToShadow(addr); - CHECK(IsShadowMem((uptr)p)); - CHECK(IsShadowMem((uptr)(p + size * kShadowCnt / kShadowCell - 1))); + RawShadow *p = MemToShadow(addr); + CHECK(IsShadowMem(p)); + CHECK(IsShadowMem(p + size * kShadowCnt / kShadowCell - 1)); // FIXME: may overwrite a part outside the region for (uptr i = 0; i < size / kShadowCell * kShadowCnt;) { p[i++] = val; @@ -948,9 +949,9 @@ } else { // The region is big, reset only beginning and end. const uptr kPageSize = GetPageSizeCached(); - u64 *begin = (u64*)MemToShadow(addr); - u64 *end = begin + size / kShadowCell * kShadowCnt; - u64 *p = begin; + RawShadow *begin = MemToShadow(addr); + RawShadow *end = begin + size / kShadowCell * kShadowCnt; + RawShadow *p = begin; // Set at least first kPageSize/2 to page boundary. while ((p < begin + kPageSize / kShadowSize / 2) || ((uptr)p % kPageSize)) { *p++ = val; @@ -958,7 +959,7 @@ *p++ = 0; } // Reset middle part. - u64 *p1 = p; + RawShadow *p1 = p; p = RoundDown(end, kPageSize); if (!MmapFixedSuperNoReserve((uptr)p1, (uptr)p - (uptr)p1)) Die(); diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_report.cpp @@ -614,7 +614,7 @@ thr->racy_state[1] = s.raw(); } - uptr addr = ShadowToMem((uptr)thr->racy_shadow_addr); + uptr addr = ShadowToMem(thr->racy_shadow_addr); uptr addr_min = 0; uptr addr_max = 0; { diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp --- a/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp +++ b/compiler-rt/lib/tsan/rtl/tsan_rtl_thread.cpp @@ -330,7 +330,7 @@ if (size == 0) return; - u64 *shadow_mem = (u64*)MemToShadow(addr); + RawShadow *shadow_mem = MemToShadow(addr); DPrintf2("#%d: MemoryAccessRange: @%p %p size=%d is_write=%d\n", thr->tid, (void*)pc, (void*)addr, (int)size, is_write); @@ -344,14 +344,14 @@ Printf("Access to non app mem %zx\n", addr + size - 1); DCHECK(IsAppMem(addr + size - 1)); } - if (!IsShadowMem((uptr)shadow_mem)) { + if (!IsShadowMem(shadow_mem)) { Printf("Bad shadow addr %p (%zx)\n", shadow_mem, addr); - DCHECK(IsShadowMem((uptr)shadow_mem)); + DCHECK(IsShadowMem(shadow_mem)); } - if (!IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))) { + if (!IsShadowMem(shadow_mem + size * kShadowCnt / 8 - 1)) { Printf("Bad shadow addr %p (%zx)\n", shadow_mem + size * kShadowCnt / 8 - 1, addr + size - 1); - DCHECK(IsShadowMem((uptr)(shadow_mem + size * kShadowCnt / 8 - 1))); + DCHECK(IsShadowMem(shadow_mem + size * kShadowCnt / 8 - 1)); } #endif diff --git a/compiler-rt/lib/tsan/tests/unit/tsan_shadow_test.cpp b/compiler-rt/lib/tsan/tests/unit/tsan_shadow_test.cpp --- a/compiler-rt/lib/tsan/tests/unit/tsan_shadow_test.cpp +++ b/compiler-rt/lib/tsan/tests/unit/tsan_shadow_test.cpp @@ -63,15 +63,15 @@ TEST(Shadow, Celling) { u64 aligned_data[4]; char *data = (char*)aligned_data; - CHECK_EQ((uptr)data % kShadowSize, 0); - uptr s0 = MemToShadow((uptr)&data[0]); - CHECK_EQ(s0 % kShadowSize, 0); + CHECK(IsAligned(reinterpret_cast(data), kShadowSize)); + RawShadow *s0 = MemToShadow((uptr)&data[0]); + CHECK(IsAligned(reinterpret_cast(s0), kShadowSize)); for (unsigned i = 1; i < kShadowCell; i++) CHECK_EQ(s0, MemToShadow((uptr)&data[i])); for (unsigned i = kShadowCell; i < 2*kShadowCell; i++) - CHECK_EQ(s0 + kShadowSize*kShadowCnt, MemToShadow((uptr)&data[i])); + CHECK_EQ(s0 + kShadowCnt, MemToShadow((uptr)&data[i])); for (unsigned i = 2*kShadowCell; i < 3*kShadowCell; i++) - CHECK_EQ(s0 + 2*kShadowSize*kShadowCnt, MemToShadow((uptr)&data[i])); + CHECK_EQ(s0 + 2 * kShadowCnt, MemToShadow((uptr)&data[i])); } } // namespace __tsan