Index: compiler-rt/trunk/lib/hwasan/CMakeLists.txt =================================================================== --- compiler-rt/trunk/lib/hwasan/CMakeLists.txt +++ compiler-rt/trunk/lib/hwasan/CMakeLists.txt @@ -2,20 +2,21 @@ # Runtime library sources and build flags. set(HWASAN_RTL_SOURCES - hwasan.cc - hwasan_allocator.cc - hwasan_dynamic_shadow.cc - hwasan_interceptors.cc - hwasan_linux.cc - hwasan_memintrinsics.cc - hwasan_poisoning.cc - hwasan_report.cc - hwasan_thread.cc - hwasan_thread_list.cc + hwasan.cpp + hwasan_allocator.cpp + hwasan_dynamic_shadow.cpp + hwasan_interceptors.cpp + hwasan_linux.cpp + hwasan_memintrinsics.cpp + hwasan_poisoning.cpp + hwasan_report.cpp + hwasan_thread.cpp + hwasan_thread_list.cpp ) set(HWASAN_RTL_CXX_SOURCES - hwasan_new_delete.cc) + hwasan_new_delete.cpp + ) set(HWASAN_RTL_HEADERS hwasan.h Index: compiler-rt/trunk/lib/hwasan/hwasan.cc =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan.cc +++ compiler-rt/trunk/lib/hwasan/hwasan.cc @@ -1,507 +0,0 @@ -//===-- hwasan.cc ---------------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of HWAddressSanitizer. -// -// HWAddressSanitizer runtime. -//===----------------------------------------------------------------------===// - -#include "hwasan.h" -#include "hwasan_checks.h" -#include "hwasan_dynamic_shadow.h" -#include "hwasan_poisoning.h" -#include "hwasan_report.h" -#include "hwasan_thread.h" -#include "hwasan_thread_list.h" -#include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_flag_parser.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_libc.h" -#include "sanitizer_common/sanitizer_procmaps.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_stacktrace.h" -#include "sanitizer_common/sanitizer_symbolizer.h" -#include "ubsan/ubsan_flags.h" -#include "ubsan/ubsan_init.h" - -// ACHTUNG! No system header includes in this file. - -using namespace __sanitizer; - -namespace __hwasan { - -void EnterSymbolizer() { - Thread *t = GetCurrentThread(); - CHECK(t); - t->EnterSymbolizer(); -} -void ExitSymbolizer() { - Thread *t = GetCurrentThread(); - CHECK(t); - t->LeaveSymbolizer(); -} -bool IsInSymbolizer() { - Thread *t = GetCurrentThread(); - return t && t->InSymbolizer(); -} - -static Flags hwasan_flags; - -Flags *flags() { - return &hwasan_flags; -} - -int hwasan_inited = 0; -int hwasan_instrumentation_inited = 0; -bool hwasan_init_is_running; - -int hwasan_report_count = 0; - -void Flags::SetDefaults() { -#define HWASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; -#include "hwasan_flags.inc" -#undef HWASAN_FLAG -} - -static void RegisterHwasanFlags(FlagParser *parser, Flags *f) { -#define HWASAN_FLAG(Type, Name, DefaultValue, Description) \ - RegisterFlag(parser, #Name, Description, &f->Name); -#include "hwasan_flags.inc" -#undef HWASAN_FLAG -} - -static void InitializeFlags() { - SetCommonFlagsDefaults(); - { - CommonFlags cf; - cf.CopyFrom(*common_flags()); - cf.external_symbolizer_path = GetEnv("HWASAN_SYMBOLIZER_PATH"); - cf.malloc_context_size = 20; - cf.handle_ioctl = true; - // FIXME: test and enable. - cf.check_printf = false; - cf.intercept_tls_get_addr = true; - cf.exitcode = 99; - // 8 shadow pages ~512kB, small enough to cover common stack sizes. - cf.clear_shadow_mmap_threshold = 4096 * (SANITIZER_ANDROID ? 2 : 8); - // Sigtrap is used in error reporting. - cf.handle_sigtrap = kHandleSignalExclusive; - -#if SANITIZER_ANDROID - // Let platform handle other signals. It is better at reporting them then we - // are. - cf.handle_segv = kHandleSignalNo; - cf.handle_sigbus = kHandleSignalNo; - cf.handle_abort = kHandleSignalNo; - cf.handle_sigill = kHandleSignalNo; - cf.handle_sigfpe = kHandleSignalNo; -#endif - OverrideCommonFlags(cf); - } - - Flags *f = flags(); - f->SetDefaults(); - - FlagParser parser; - RegisterHwasanFlags(&parser, f); - RegisterCommonFlags(&parser); - -#if HWASAN_CONTAINS_UBSAN - __ubsan::Flags *uf = __ubsan::flags(); - uf->SetDefaults(); - - FlagParser ubsan_parser; - __ubsan::RegisterUbsanFlags(&ubsan_parser, uf); - RegisterCommonFlags(&ubsan_parser); -#endif - - // Override from user-specified string. - if (__hwasan_default_options) - parser.ParseString(__hwasan_default_options()); -#if HWASAN_CONTAINS_UBSAN - const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions(); - ubsan_parser.ParseString(ubsan_default_options); -#endif - - const char *hwasan_options = GetEnv("HWASAN_OPTIONS"); - parser.ParseString(hwasan_options); -#if HWASAN_CONTAINS_UBSAN - ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS")); -#endif - VPrintf(1, "HWASAN_OPTIONS: %s\n", - hwasan_options ? hwasan_options : ""); - - InitializeCommonFlags(); - - if (Verbosity()) ReportUnrecognizedFlags(); - - if (common_flags()->help) parser.PrintFlagDescriptions(); -} - -void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp, - void *context, bool request_fast_unwind) { - Thread *t = GetCurrentThread(); - if (!t) { - // the thread is still being created. - stack->size = 0; - return; - } - if (!StackTrace::WillUseFastUnwind(request_fast_unwind)) { - // Block reports from our interceptors during _Unwind_Backtrace. - SymbolizerScope sym_scope; - return stack->Unwind(max_s, pc, bp, context, 0, 0, request_fast_unwind); - } - stack->Unwind(max_s, pc, bp, context, t->stack_top(), t->stack_bottom(), - request_fast_unwind); -} - -static void HWAsanCheckFailed(const char *file, int line, const char *cond, - u64 v1, u64 v2) { - Report("HWAddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, - line, cond, (uptr)v1, (uptr)v2); - PRINT_CURRENT_STACK_CHECK(); - Die(); -} - -static constexpr uptr kMemoryUsageBufferSize = 4096; - -static void HwasanFormatMemoryUsage(InternalScopedString &s) { - HwasanThreadList &thread_list = hwasanThreadList(); - auto thread_stats = thread_list.GetThreadStats(); - auto *sds = StackDepotGetStats(); - AllocatorStatCounters asc; - GetAllocatorStats(asc); - s.append( - "HWASAN pid: %d rss: %zd threads: %zd stacks: %zd" - " thr_aux: %zd stack_depot: %zd uniq_stacks: %zd" - " heap: %zd", - internal_getpid(), GetRSS(), thread_stats.n_live_threads, - thread_stats.total_stack_size, - thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(), - sds->allocated, sds->n_uniq_ids, asc[AllocatorStatMapped]); -} - -#if SANITIZER_ANDROID -static char *memory_usage_buffer = nullptr; - -static void InitMemoryUsage() { - memory_usage_buffer = - (char *)MmapOrDie(kMemoryUsageBufferSize, "memory usage string"); - CHECK(memory_usage_buffer); - memory_usage_buffer[0] = '\0'; - DecorateMapping((uptr)memory_usage_buffer, kMemoryUsageBufferSize, - memory_usage_buffer); -} - -void UpdateMemoryUsage() { - if (!flags()->export_memory_stats) - return; - if (!memory_usage_buffer) - InitMemoryUsage(); - InternalScopedString s(kMemoryUsageBufferSize); - HwasanFormatMemoryUsage(s); - internal_strncpy(memory_usage_buffer, s.data(), kMemoryUsageBufferSize - 1); - memory_usage_buffer[kMemoryUsageBufferSize - 1] = '\0'; -} -#else -void UpdateMemoryUsage() {} -#endif - -struct FrameDescription { - uptr PC; - const char *Descr; -}; - -struct FrameDescriptionArray { - FrameDescription *beg, *end; -}; - -static InternalMmapVectorNoCtor AllFrames; - -void InitFrameDescriptors(uptr b, uptr e) { - FrameDescription *beg = reinterpret_cast(b); - FrameDescription *end = reinterpret_cast(e); - if (beg == end) - return; - AllFrames.push_back({beg, end}); - if (Verbosity()) - for (FrameDescription *frame_descr = beg; frame_descr < end; frame_descr++) - Printf("Frame: %p %s\n", frame_descr->PC, frame_descr->Descr); -} - -const char *GetStackFrameDescr(uptr pc) { - for (uptr i = 0, n = AllFrames.size(); i < n; i++) - for (auto p = AllFrames[i].beg; p < AllFrames[i].end; p++) - if (p->PC == pc) - return p->Descr; - return nullptr; -} - -// Prepare to run instrumented code on the main thread. -void InitInstrumentation() { - if (hwasan_instrumentation_inited) return; - - if (!InitShadow()) { - Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n"); - DumpProcessMap(); - Die(); - } - - InitThreads(); - hwasanThreadList().CreateCurrentThread(); - - hwasan_instrumentation_inited = 1; -} - -} // namespace __hwasan - -// Interface. - -using namespace __hwasan; - -uptr __hwasan_shadow_memory_dynamic_address; // Global interface symbol. - -void __hwasan_init_frames(uptr beg, uptr end) { - InitFrameDescriptors(beg, end); -} - -void __hwasan_init_static() { - InitShadowGOT(); - InitInstrumentation(); -} - -void __hwasan_init() { - CHECK(!hwasan_init_is_running); - if (hwasan_inited) return; - hwasan_init_is_running = 1; - SanitizerToolName = "HWAddressSanitizer"; - - InitTlsSize(); - - CacheBinaryName(); - InitializeFlags(); - - // Install tool-specific callbacks in sanitizer_common. - SetCheckFailedCallback(HWAsanCheckFailed); - - __sanitizer_set_report_path(common_flags()->log_path); - - AndroidTestTlsSlot(); - - DisableCoreDumperIfNecessary(); - - InitInstrumentation(); - - // Needs to be called here because flags()->random_tags might not have been - // initialized when InitInstrumentation() was called. - GetCurrentThread()->InitRandomState(); - - MadviseShadow(); - - SetPrintfAndReportCallback(AppendToErrorMessageBuffer); - // This may call libc -> needs initialized shadow. - AndroidLogInit(); - - InitializeInterceptors(); - InstallDeadlySignalHandlers(HwasanOnDeadlySignal); - InstallAtExitHandler(); // Needs __cxa_atexit interceptor. - - Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); - - InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); - - HwasanTSDInit(); - HwasanTSDThreadInit(); - - HwasanAllocatorInit(); - -#if HWASAN_CONTAINS_UBSAN - __ubsan::InitAsPlugin(); -#endif - - VPrintf(1, "HWAddressSanitizer init done\n"); - - hwasan_init_is_running = 0; - hwasan_inited = 1; -} - -void __hwasan_print_shadow(const void *p, uptr sz) { - uptr ptr_raw = UntagAddr(reinterpret_cast(p)); - uptr shadow_first = MemToShadow(ptr_raw); - uptr shadow_last = MemToShadow(ptr_raw + sz - 1); - Printf("HWASan shadow map for %zx .. %zx (pointer tag %x)\n", ptr_raw, - ptr_raw + sz, GetTagFromPointer((uptr)p)); - for (uptr s = shadow_first; s <= shadow_last; ++s) - Printf(" %zx: %x\n", ShadowToMem(s), *(tag_t *)s); -} - -sptr __hwasan_test_shadow(const void *p, uptr sz) { - if (sz == 0) - return -1; - tag_t ptr_tag = GetTagFromPointer((uptr)p); - uptr ptr_raw = UntagAddr(reinterpret_cast(p)); - uptr shadow_first = MemToShadow(ptr_raw); - uptr shadow_last = MemToShadow(ptr_raw + sz - 1); - for (uptr s = shadow_first; s <= shadow_last; ++s) - if (*(tag_t *)s != ptr_tag) { - sptr offset = ShadowToMem(s) - ptr_raw; - return offset < 0 ? 0 : offset; - } - return -1; -} - -u16 __sanitizer_unaligned_load16(const uu16 *p) { - return *p; -} -u32 __sanitizer_unaligned_load32(const uu32 *p) { - return *p; -} -u64 __sanitizer_unaligned_load64(const uu64 *p) { - return *p; -} -void __sanitizer_unaligned_store16(uu16 *p, u16 x) { - *p = x; -} -void __sanitizer_unaligned_store32(uu32 *p, u32 x) { - *p = x; -} -void __sanitizer_unaligned_store64(uu64 *p, u64 x) { - *p = x; -} - -void __hwasan_loadN(uptr p, uptr sz) { - CheckAddressSized(p, sz); -} -void __hwasan_load1(uptr p) { - CheckAddress(p); -} -void __hwasan_load2(uptr p) { - CheckAddress(p); -} -void __hwasan_load4(uptr p) { - CheckAddress(p); -} -void __hwasan_load8(uptr p) { - CheckAddress(p); -} -void __hwasan_load16(uptr p) { - CheckAddress(p); -} - -void __hwasan_loadN_noabort(uptr p, uptr sz) { - CheckAddressSized(p, sz); -} -void __hwasan_load1_noabort(uptr p) { - CheckAddress(p); -} -void __hwasan_load2_noabort(uptr p) { - CheckAddress(p); -} -void __hwasan_load4_noabort(uptr p) { - CheckAddress(p); -} -void __hwasan_load8_noabort(uptr p) { - CheckAddress(p); -} -void __hwasan_load16_noabort(uptr p) { - CheckAddress(p); -} - -void __hwasan_storeN(uptr p, uptr sz) { - CheckAddressSized(p, sz); -} -void __hwasan_store1(uptr p) { - CheckAddress(p); -} -void __hwasan_store2(uptr p) { - CheckAddress(p); -} -void __hwasan_store4(uptr p) { - CheckAddress(p); -} -void __hwasan_store8(uptr p) { - CheckAddress(p); -} -void __hwasan_store16(uptr p) { - CheckAddress(p); -} - -void __hwasan_storeN_noabort(uptr p, uptr sz) { - CheckAddressSized(p, sz); -} -void __hwasan_store1_noabort(uptr p) { - CheckAddress(p); -} -void __hwasan_store2_noabort(uptr p) { - CheckAddress(p); -} -void __hwasan_store4_noabort(uptr p) { - CheckAddress(p); -} -void __hwasan_store8_noabort(uptr p) { - CheckAddress(p); -} -void __hwasan_store16_noabort(uptr p) { - CheckAddress(p); -} - -void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) { - TagMemoryAligned(p, sz, tag); -} - -uptr __hwasan_tag_pointer(uptr p, u8 tag) { - return AddTagToPointer(p, tag); -} - -void __hwasan_handle_longjmp(const void *sp_dst) { - uptr dst = (uptr)sp_dst; - // HWASan does not support tagged SP. - CHECK(GetTagFromPointer(dst) == 0); - - uptr sp = (uptr)__builtin_frame_address(0); - static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M - if (dst < sp || dst - sp > kMaxExpectedCleanupSize) { - Report( - "WARNING: HWASan is ignoring requested __hwasan_handle_longjmp: " - "stack top: %p; target %p; distance: %p (%zd)\n" - "False positive error reports may follow\n", - (void *)sp, (void *)dst, dst - sp); - return; - } - TagMemory(sp, dst - sp, 0); -} - -void __hwasan_print_memory_usage() { - InternalScopedString s(kMemoryUsageBufferSize); - HwasanFormatMemoryUsage(s); - Printf("%s\n", s.data()); -} - -static const u8 kFallbackTag = 0xBB; - -u8 __hwasan_generate_tag() { - Thread *t = GetCurrentThread(); - if (!t) return kFallbackTag; - return t->GenerateRandomTag(); -} - -#if !SANITIZER_SUPPORTS_WEAK_HOOKS -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE -const char* __hwasan_default_options() { return ""; } -} // extern "C" -#endif - -extern "C" { -SANITIZER_INTERFACE_ATTRIBUTE -void __sanitizer_print_stack_trace() { - GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME()); - stack.Print(); -} -} // extern "C" Index: compiler-rt/trunk/lib/hwasan/hwasan.cpp =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan.cpp +++ compiler-rt/trunk/lib/hwasan/hwasan.cpp @@ -0,0 +1,507 @@ +//===-- hwasan.cpp --------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of HWAddressSanitizer. +// +// HWAddressSanitizer runtime. +//===----------------------------------------------------------------------===// + +#include "hwasan.h" +#include "hwasan_checks.h" +#include "hwasan_dynamic_shadow.h" +#include "hwasan_poisoning.h" +#include "hwasan_report.h" +#include "hwasan_thread.h" +#include "hwasan_thread_list.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flag_parser.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_procmaps.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_stacktrace.h" +#include "sanitizer_common/sanitizer_symbolizer.h" +#include "ubsan/ubsan_flags.h" +#include "ubsan/ubsan_init.h" + +// ACHTUNG! No system header includes in this file. + +using namespace __sanitizer; + +namespace __hwasan { + +void EnterSymbolizer() { + Thread *t = GetCurrentThread(); + CHECK(t); + t->EnterSymbolizer(); +} +void ExitSymbolizer() { + Thread *t = GetCurrentThread(); + CHECK(t); + t->LeaveSymbolizer(); +} +bool IsInSymbolizer() { + Thread *t = GetCurrentThread(); + return t && t->InSymbolizer(); +} + +static Flags hwasan_flags; + +Flags *flags() { + return &hwasan_flags; +} + +int hwasan_inited = 0; +int hwasan_instrumentation_inited = 0; +bool hwasan_init_is_running; + +int hwasan_report_count = 0; + +void Flags::SetDefaults() { +#define HWASAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue; +#include "hwasan_flags.inc" +#undef HWASAN_FLAG +} + +static void RegisterHwasanFlags(FlagParser *parser, Flags *f) { +#define HWASAN_FLAG(Type, Name, DefaultValue, Description) \ + RegisterFlag(parser, #Name, Description, &f->Name); +#include "hwasan_flags.inc" +#undef HWASAN_FLAG +} + +static void InitializeFlags() { + SetCommonFlagsDefaults(); + { + CommonFlags cf; + cf.CopyFrom(*common_flags()); + cf.external_symbolizer_path = GetEnv("HWASAN_SYMBOLIZER_PATH"); + cf.malloc_context_size = 20; + cf.handle_ioctl = true; + // FIXME: test and enable. + cf.check_printf = false; + cf.intercept_tls_get_addr = true; + cf.exitcode = 99; + // 8 shadow pages ~512kB, small enough to cover common stack sizes. + cf.clear_shadow_mmap_threshold = 4096 * (SANITIZER_ANDROID ? 2 : 8); + // Sigtrap is used in error reporting. + cf.handle_sigtrap = kHandleSignalExclusive; + +#if SANITIZER_ANDROID + // Let platform handle other signals. It is better at reporting them then we + // are. + cf.handle_segv = kHandleSignalNo; + cf.handle_sigbus = kHandleSignalNo; + cf.handle_abort = kHandleSignalNo; + cf.handle_sigill = kHandleSignalNo; + cf.handle_sigfpe = kHandleSignalNo; +#endif + OverrideCommonFlags(cf); + } + + Flags *f = flags(); + f->SetDefaults(); + + FlagParser parser; + RegisterHwasanFlags(&parser, f); + RegisterCommonFlags(&parser); + +#if HWASAN_CONTAINS_UBSAN + __ubsan::Flags *uf = __ubsan::flags(); + uf->SetDefaults(); + + FlagParser ubsan_parser; + __ubsan::RegisterUbsanFlags(&ubsan_parser, uf); + RegisterCommonFlags(&ubsan_parser); +#endif + + // Override from user-specified string. + if (__hwasan_default_options) + parser.ParseString(__hwasan_default_options()); +#if HWASAN_CONTAINS_UBSAN + const char *ubsan_default_options = __ubsan::MaybeCallUbsanDefaultOptions(); + ubsan_parser.ParseString(ubsan_default_options); +#endif + + const char *hwasan_options = GetEnv("HWASAN_OPTIONS"); + parser.ParseString(hwasan_options); +#if HWASAN_CONTAINS_UBSAN + ubsan_parser.ParseString(GetEnv("UBSAN_OPTIONS")); +#endif + VPrintf(1, "HWASAN_OPTIONS: %s\n", + hwasan_options ? hwasan_options : ""); + + InitializeCommonFlags(); + + if (Verbosity()) ReportUnrecognizedFlags(); + + if (common_flags()->help) parser.PrintFlagDescriptions(); +} + +void GetStackTrace(BufferedStackTrace *stack, uptr max_s, uptr pc, uptr bp, + void *context, bool request_fast_unwind) { + Thread *t = GetCurrentThread(); + if (!t) { + // the thread is still being created. + stack->size = 0; + return; + } + if (!StackTrace::WillUseFastUnwind(request_fast_unwind)) { + // Block reports from our interceptors during _Unwind_Backtrace. + SymbolizerScope sym_scope; + return stack->Unwind(max_s, pc, bp, context, 0, 0, request_fast_unwind); + } + stack->Unwind(max_s, pc, bp, context, t->stack_top(), t->stack_bottom(), + request_fast_unwind); +} + +static void HWAsanCheckFailed(const char *file, int line, const char *cond, + u64 v1, u64 v2) { + Report("HWAddressSanitizer CHECK failed: %s:%d \"%s\" (0x%zx, 0x%zx)\n", file, + line, cond, (uptr)v1, (uptr)v2); + PRINT_CURRENT_STACK_CHECK(); + Die(); +} + +static constexpr uptr kMemoryUsageBufferSize = 4096; + +static void HwasanFormatMemoryUsage(InternalScopedString &s) { + HwasanThreadList &thread_list = hwasanThreadList(); + auto thread_stats = thread_list.GetThreadStats(); + auto *sds = StackDepotGetStats(); + AllocatorStatCounters asc; + GetAllocatorStats(asc); + s.append( + "HWASAN pid: %d rss: %zd threads: %zd stacks: %zd" + " thr_aux: %zd stack_depot: %zd uniq_stacks: %zd" + " heap: %zd", + internal_getpid(), GetRSS(), thread_stats.n_live_threads, + thread_stats.total_stack_size, + thread_stats.n_live_threads * thread_list.MemoryUsedPerThread(), + sds->allocated, sds->n_uniq_ids, asc[AllocatorStatMapped]); +} + +#if SANITIZER_ANDROID +static char *memory_usage_buffer = nullptr; + +static void InitMemoryUsage() { + memory_usage_buffer = + (char *)MmapOrDie(kMemoryUsageBufferSize, "memory usage string"); + CHECK(memory_usage_buffer); + memory_usage_buffer[0] = '\0'; + DecorateMapping((uptr)memory_usage_buffer, kMemoryUsageBufferSize, + memory_usage_buffer); +} + +void UpdateMemoryUsage() { + if (!flags()->export_memory_stats) + return; + if (!memory_usage_buffer) + InitMemoryUsage(); + InternalScopedString s(kMemoryUsageBufferSize); + HwasanFormatMemoryUsage(s); + internal_strncpy(memory_usage_buffer, s.data(), kMemoryUsageBufferSize - 1); + memory_usage_buffer[kMemoryUsageBufferSize - 1] = '\0'; +} +#else +void UpdateMemoryUsage() {} +#endif + +struct FrameDescription { + uptr PC; + const char *Descr; +}; + +struct FrameDescriptionArray { + FrameDescription *beg, *end; +}; + +static InternalMmapVectorNoCtor AllFrames; + +void InitFrameDescriptors(uptr b, uptr e) { + FrameDescription *beg = reinterpret_cast(b); + FrameDescription *end = reinterpret_cast(e); + if (beg == end) + return; + AllFrames.push_back({beg, end}); + if (Verbosity()) + for (FrameDescription *frame_descr = beg; frame_descr < end; frame_descr++) + Printf("Frame: %p %s\n", frame_descr->PC, frame_descr->Descr); +} + +const char *GetStackFrameDescr(uptr pc) { + for (uptr i = 0, n = AllFrames.size(); i < n; i++) + for (auto p = AllFrames[i].beg; p < AllFrames[i].end; p++) + if (p->PC == pc) + return p->Descr; + return nullptr; +} + +// Prepare to run instrumented code on the main thread. +void InitInstrumentation() { + if (hwasan_instrumentation_inited) return; + + if (!InitShadow()) { + Printf("FATAL: HWAddressSanitizer cannot mmap the shadow memory.\n"); + DumpProcessMap(); + Die(); + } + + InitThreads(); + hwasanThreadList().CreateCurrentThread(); + + hwasan_instrumentation_inited = 1; +} + +} // namespace __hwasan + +// Interface. + +using namespace __hwasan; + +uptr __hwasan_shadow_memory_dynamic_address; // Global interface symbol. + +void __hwasan_init_frames(uptr beg, uptr end) { + InitFrameDescriptors(beg, end); +} + +void __hwasan_init_static() { + InitShadowGOT(); + InitInstrumentation(); +} + +void __hwasan_init() { + CHECK(!hwasan_init_is_running); + if (hwasan_inited) return; + hwasan_init_is_running = 1; + SanitizerToolName = "HWAddressSanitizer"; + + InitTlsSize(); + + CacheBinaryName(); + InitializeFlags(); + + // Install tool-specific callbacks in sanitizer_common. + SetCheckFailedCallback(HWAsanCheckFailed); + + __sanitizer_set_report_path(common_flags()->log_path); + + AndroidTestTlsSlot(); + + DisableCoreDumperIfNecessary(); + + InitInstrumentation(); + + // Needs to be called here because flags()->random_tags might not have been + // initialized when InitInstrumentation() was called. + GetCurrentThread()->InitRandomState(); + + MadviseShadow(); + + SetPrintfAndReportCallback(AppendToErrorMessageBuffer); + // This may call libc -> needs initialized shadow. + AndroidLogInit(); + + InitializeInterceptors(); + InstallDeadlySignalHandlers(HwasanOnDeadlySignal); + InstallAtExitHandler(); // Needs __cxa_atexit interceptor. + + Symbolizer::GetOrInit()->AddHooks(EnterSymbolizer, ExitSymbolizer); + + InitializeCoverage(common_flags()->coverage, common_flags()->coverage_dir); + + HwasanTSDInit(); + HwasanTSDThreadInit(); + + HwasanAllocatorInit(); + +#if HWASAN_CONTAINS_UBSAN + __ubsan::InitAsPlugin(); +#endif + + VPrintf(1, "HWAddressSanitizer init done\n"); + + hwasan_init_is_running = 0; + hwasan_inited = 1; +} + +void __hwasan_print_shadow(const void *p, uptr sz) { + uptr ptr_raw = UntagAddr(reinterpret_cast(p)); + uptr shadow_first = MemToShadow(ptr_raw); + uptr shadow_last = MemToShadow(ptr_raw + sz - 1); + Printf("HWASan shadow map for %zx .. %zx (pointer tag %x)\n", ptr_raw, + ptr_raw + sz, GetTagFromPointer((uptr)p)); + for (uptr s = shadow_first; s <= shadow_last; ++s) + Printf(" %zx: %x\n", ShadowToMem(s), *(tag_t *)s); +} + +sptr __hwasan_test_shadow(const void *p, uptr sz) { + if (sz == 0) + return -1; + tag_t ptr_tag = GetTagFromPointer((uptr)p); + uptr ptr_raw = UntagAddr(reinterpret_cast(p)); + uptr shadow_first = MemToShadow(ptr_raw); + uptr shadow_last = MemToShadow(ptr_raw + sz - 1); + for (uptr s = shadow_first; s <= shadow_last; ++s) + if (*(tag_t *)s != ptr_tag) { + sptr offset = ShadowToMem(s) - ptr_raw; + return offset < 0 ? 0 : offset; + } + return -1; +} + +u16 __sanitizer_unaligned_load16(const uu16 *p) { + return *p; +} +u32 __sanitizer_unaligned_load32(const uu32 *p) { + return *p; +} +u64 __sanitizer_unaligned_load64(const uu64 *p) { + return *p; +} +void __sanitizer_unaligned_store16(uu16 *p, u16 x) { + *p = x; +} +void __sanitizer_unaligned_store32(uu32 *p, u32 x) { + *p = x; +} +void __sanitizer_unaligned_store64(uu64 *p, u64 x) { + *p = x; +} + +void __hwasan_loadN(uptr p, uptr sz) { + CheckAddressSized(p, sz); +} +void __hwasan_load1(uptr p) { + CheckAddress(p); +} +void __hwasan_load2(uptr p) { + CheckAddress(p); +} +void __hwasan_load4(uptr p) { + CheckAddress(p); +} +void __hwasan_load8(uptr p) { + CheckAddress(p); +} +void __hwasan_load16(uptr p) { + CheckAddress(p); +} + +void __hwasan_loadN_noabort(uptr p, uptr sz) { + CheckAddressSized(p, sz); +} +void __hwasan_load1_noabort(uptr p) { + CheckAddress(p); +} +void __hwasan_load2_noabort(uptr p) { + CheckAddress(p); +} +void __hwasan_load4_noabort(uptr p) { + CheckAddress(p); +} +void __hwasan_load8_noabort(uptr p) { + CheckAddress(p); +} +void __hwasan_load16_noabort(uptr p) { + CheckAddress(p); +} + +void __hwasan_storeN(uptr p, uptr sz) { + CheckAddressSized(p, sz); +} +void __hwasan_store1(uptr p) { + CheckAddress(p); +} +void __hwasan_store2(uptr p) { + CheckAddress(p); +} +void __hwasan_store4(uptr p) { + CheckAddress(p); +} +void __hwasan_store8(uptr p) { + CheckAddress(p); +} +void __hwasan_store16(uptr p) { + CheckAddress(p); +} + +void __hwasan_storeN_noabort(uptr p, uptr sz) { + CheckAddressSized(p, sz); +} +void __hwasan_store1_noabort(uptr p) { + CheckAddress(p); +} +void __hwasan_store2_noabort(uptr p) { + CheckAddress(p); +} +void __hwasan_store4_noabort(uptr p) { + CheckAddress(p); +} +void __hwasan_store8_noabort(uptr p) { + CheckAddress(p); +} +void __hwasan_store16_noabort(uptr p) { + CheckAddress(p); +} + +void __hwasan_tag_memory(uptr p, u8 tag, uptr sz) { + TagMemoryAligned(p, sz, tag); +} + +uptr __hwasan_tag_pointer(uptr p, u8 tag) { + return AddTagToPointer(p, tag); +} + +void __hwasan_handle_longjmp(const void *sp_dst) { + uptr dst = (uptr)sp_dst; + // HWASan does not support tagged SP. + CHECK(GetTagFromPointer(dst) == 0); + + uptr sp = (uptr)__builtin_frame_address(0); + static const uptr kMaxExpectedCleanupSize = 64 << 20; // 64M + if (dst < sp || dst - sp > kMaxExpectedCleanupSize) { + Report( + "WARNING: HWASan is ignoring requested __hwasan_handle_longjmp: " + "stack top: %p; target %p; distance: %p (%zd)\n" + "False positive error reports may follow\n", + (void *)sp, (void *)dst, dst - sp); + return; + } + TagMemory(sp, dst - sp, 0); +} + +void __hwasan_print_memory_usage() { + InternalScopedString s(kMemoryUsageBufferSize); + HwasanFormatMemoryUsage(s); + Printf("%s\n", s.data()); +} + +static const u8 kFallbackTag = 0xBB; + +u8 __hwasan_generate_tag() { + Thread *t = GetCurrentThread(); + if (!t) return kFallbackTag; + return t->GenerateRandomTag(); +} + +#if !SANITIZER_SUPPORTS_WEAK_HOOKS +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE +const char* __hwasan_default_options() { return ""; } +} // extern "C" +#endif + +extern "C" { +SANITIZER_INTERFACE_ATTRIBUTE +void __sanitizer_print_stack_trace() { + GET_FATAL_STACK_TRACE_PC_BP(StackTrace::GetCurrentPc(), GET_CURRENT_FRAME()); + stack.Print(); +} +} // extern "C" Index: compiler-rt/trunk/lib/hwasan/hwasan_allocator.cc =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_allocator.cc +++ compiler-rt/trunk/lib/hwasan/hwasan_allocator.cc @@ -1,450 +0,0 @@ -//===-- hwasan_allocator.cc ------------------------- ---------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of HWAddressSanitizer. -// -// HWAddressSanitizer allocator. -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_errno.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "hwasan.h" -#include "hwasan_allocator.h" -#include "hwasan_mapping.h" -#include "hwasan_malloc_bisect.h" -#include "hwasan_thread.h" -#include "hwasan_report.h" - -#if HWASAN_WITH_INTERCEPTORS -DEFINE_REAL(void *, realloc, void *ptr, uptr size) -DEFINE_REAL(void, free, void *ptr) -#endif - -namespace __hwasan { - -static Allocator allocator; -static AllocatorCache fallback_allocator_cache; -static SpinMutex fallback_mutex; -static atomic_uint8_t hwasan_allocator_tagging_enabled; - -static const tag_t kFallbackAllocTag = 0xBB; -static const tag_t kFallbackFreeTag = 0xBC; - -enum RightAlignMode { - kRightAlignNever, - kRightAlignSometimes, - kRightAlignAlways -}; - -// These two variables are initialized from flags()->malloc_align_right -// in HwasanAllocatorInit and are never changed afterwards. -static RightAlignMode right_align_mode = kRightAlignNever; -static bool right_align_8 = false; - -// Initialized in HwasanAllocatorInit, an never changed. -static ALIGNED(16) u8 tail_magic[kShadowAlignment]; - -bool HwasanChunkView::IsAllocated() const { - return metadata_ && metadata_->alloc_context_id && metadata_->requested_size; -} - -// Aligns the 'addr' right to the granule boundary. -static uptr AlignRight(uptr addr, uptr requested_size) { - uptr tail_size = requested_size % kShadowAlignment; - if (!tail_size) return addr; - if (right_align_8) - return tail_size > 8 ? addr : addr + 8; - return addr + kShadowAlignment - tail_size; -} - -uptr HwasanChunkView::Beg() const { - if (metadata_ && metadata_->right_aligned) - return AlignRight(block_, metadata_->requested_size); - return block_; -} -uptr HwasanChunkView::End() const { - return Beg() + UsedSize(); -} -uptr HwasanChunkView::UsedSize() const { - return metadata_->requested_size; -} -u32 HwasanChunkView::GetAllocStackId() const { - return metadata_->alloc_context_id; -} - -uptr HwasanChunkView::ActualSize() const { - return allocator.GetActuallyAllocatedSize(reinterpret_cast(block_)); -} - -bool HwasanChunkView::FromSmallHeap() const { - return allocator.FromPrimary(reinterpret_cast(block_)); -} - -void GetAllocatorStats(AllocatorStatCounters s) { - allocator.GetStats(s); -} - -void HwasanAllocatorInit() { - atomic_store_relaxed(&hwasan_allocator_tagging_enabled, - !flags()->disable_allocator_tagging); - SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); - allocator.Init(common_flags()->allocator_release_to_os_interval_ms); - switch (flags()->malloc_align_right) { - case 0: break; - case 1: - right_align_mode = kRightAlignSometimes; - right_align_8 = false; - break; - case 2: - right_align_mode = kRightAlignAlways; - right_align_8 = false; - break; - case 8: - right_align_mode = kRightAlignSometimes; - right_align_8 = true; - break; - case 9: - right_align_mode = kRightAlignAlways; - right_align_8 = true; - break; - default: - Report("ERROR: unsupported value of malloc_align_right flag: %d\n", - flags()->malloc_align_right); - Die(); - } - for (uptr i = 0; i < kShadowAlignment; i++) - tail_magic[i] = GetCurrentThread()->GenerateRandomTag(); -} - -void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) { - allocator.SwallowCache(cache); -} - -static uptr TaggedSize(uptr size) { - if (!size) size = 1; - uptr new_size = RoundUpTo(size, kShadowAlignment); - CHECK_GE(new_size, size); - return new_size; -} - -static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment, - bool zeroise) { - if (orig_size > kMaxAllowedMallocSize) { - if (AllocatorMayReturnNull()) { - Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n", - orig_size); - return nullptr; - } - ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack); - } - - alignment = Max(alignment, kShadowAlignment); - uptr size = TaggedSize(orig_size); - Thread *t = GetCurrentThread(); - void *allocated; - if (t) { - allocated = allocator.Allocate(t->allocator_cache(), size, alignment); - } else { - SpinMutexLock l(&fallback_mutex); - AllocatorCache *cache = &fallback_allocator_cache; - allocated = allocator.Allocate(cache, size, alignment); - } - if (UNLIKELY(!allocated)) { - SetAllocatorOutOfMemory(); - if (AllocatorMayReturnNull()) - return nullptr; - ReportOutOfMemory(size, stack); - } - Metadata *meta = - reinterpret_cast(allocator.GetMetaData(allocated)); - meta->requested_size = static_cast(orig_size); - meta->alloc_context_id = StackDepotPut(*stack); - meta->right_aligned = false; - if (zeroise) { - internal_memset(allocated, 0, size); - } else if (flags()->max_malloc_fill_size > 0) { - uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size); - internal_memset(allocated, flags()->malloc_fill_byte, fill_size); - } - if (!right_align_mode) - internal_memcpy(reinterpret_cast(allocated) + orig_size, tail_magic, - size - orig_size); - - void *user_ptr = allocated; - // Tagging can only be skipped when both tag_in_malloc and tag_in_free are - // false. When tag_in_malloc = false and tag_in_free = true malloc needs to - // retag to 0. - if ((flags()->tag_in_malloc || flags()->tag_in_free) && - atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) { - tag_t tag = flags()->tag_in_malloc && malloc_bisect(stack, orig_size) - ? (t ? t->GenerateRandomTag() : kFallbackAllocTag) - : 0; - user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, tag); - } - - if ((orig_size % kShadowAlignment) && (alignment <= kShadowAlignment) && - right_align_mode) { - uptr as_uptr = reinterpret_cast(user_ptr); - if (right_align_mode == kRightAlignAlways || - GetTagFromPointer(as_uptr) & 1) { // use a tag bit as a random bit. - user_ptr = reinterpret_cast(AlignRight(as_uptr, orig_size)); - meta->right_aligned = 1; - } - } - - HWASAN_MALLOC_HOOK(user_ptr, size); - return user_ptr; -} - -static bool PointerAndMemoryTagsMatch(void *tagged_ptr) { - CHECK(tagged_ptr); - tag_t ptr_tag = GetTagFromPointer(reinterpret_cast(tagged_ptr)); - tag_t mem_tag = *reinterpret_cast( - MemToShadow(reinterpret_cast(UntagPtr(tagged_ptr)))); - return ptr_tag == mem_tag; -} - -static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) { - CHECK(tagged_ptr); - HWASAN_FREE_HOOK(tagged_ptr); - - if (!PointerAndMemoryTagsMatch(tagged_ptr)) - ReportInvalidFree(stack, reinterpret_cast(tagged_ptr)); - - void *untagged_ptr = UntagPtr(tagged_ptr); - void *aligned_ptr = reinterpret_cast( - RoundDownTo(reinterpret_cast(untagged_ptr), kShadowAlignment)); - Metadata *meta = - reinterpret_cast(allocator.GetMetaData(aligned_ptr)); - uptr orig_size = meta->requested_size; - u32 free_context_id = StackDepotPut(*stack); - u32 alloc_context_id = meta->alloc_context_id; - - // Check tail magic. - uptr tagged_size = TaggedSize(orig_size); - if (flags()->free_checks_tail_magic && !right_align_mode && orig_size) { - uptr tail_size = tagged_size - orig_size; - CHECK_LT(tail_size, kShadowAlignment); - void *tail_beg = reinterpret_cast( - reinterpret_cast(aligned_ptr) + orig_size); - if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size)) - ReportTailOverwritten(stack, reinterpret_cast(tagged_ptr), - orig_size, tail_size, tail_magic); - } - - meta->requested_size = 0; - meta->alloc_context_id = 0; - // This memory will not be reused by anyone else, so we are free to keep it - // poisoned. - Thread *t = GetCurrentThread(); - if (flags()->max_free_fill_size > 0) { - uptr fill_size = - Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size); - internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size); - } - if (flags()->tag_in_free && malloc_bisect(stack, 0) && - atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) - TagMemoryAligned(reinterpret_cast(aligned_ptr), TaggedSize(orig_size), - t ? t->GenerateRandomTag() : kFallbackFreeTag); - if (t) { - allocator.Deallocate(t->allocator_cache(), aligned_ptr); - if (auto *ha = t->heap_allocations()) - ha->push({reinterpret_cast(tagged_ptr), alloc_context_id, - free_context_id, static_cast(orig_size)}); - } else { - SpinMutexLock l(&fallback_mutex); - AllocatorCache *cache = &fallback_allocator_cache; - allocator.Deallocate(cache, aligned_ptr); - } -} - -static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old, - uptr new_size, uptr alignment) { - if (!PointerAndMemoryTagsMatch(tagged_ptr_old)) - ReportInvalidFree(stack, reinterpret_cast(tagged_ptr_old)); - - void *tagged_ptr_new = - HwasanAllocate(stack, new_size, alignment, false /*zeroise*/); - if (tagged_ptr_old && tagged_ptr_new) { - void *untagged_ptr_old = UntagPtr(tagged_ptr_old); - Metadata *meta = - reinterpret_cast(allocator.GetMetaData(untagged_ptr_old)); - internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old, - Min(new_size, static_cast(meta->requested_size))); - HwasanDeallocate(stack, tagged_ptr_old); - } - return tagged_ptr_new; -} - -static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) { - if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { - if (AllocatorMayReturnNull()) - return nullptr; - ReportCallocOverflow(nmemb, size, stack); - } - return HwasanAllocate(stack, nmemb * size, sizeof(u64), true); -} - -HwasanChunkView FindHeapChunkByAddress(uptr address) { - void *block = allocator.GetBlockBegin(reinterpret_cast(address)); - if (!block) - return HwasanChunkView(); - Metadata *metadata = - reinterpret_cast(allocator.GetMetaData(block)); - return HwasanChunkView(reinterpret_cast(block), metadata); -} - -static uptr AllocationSize(const void *tagged_ptr) { - const void *untagged_ptr = UntagPtr(tagged_ptr); - if (!untagged_ptr) return 0; - const void *beg = allocator.GetBlockBegin(untagged_ptr); - Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr); - if (b->right_aligned) { - if (beg != reinterpret_cast(RoundDownTo( - reinterpret_cast(untagged_ptr), kShadowAlignment))) - return 0; - } else { - if (beg != untagged_ptr) return 0; - } - return b->requested_size; -} - -void *hwasan_malloc(uptr size, StackTrace *stack) { - return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false)); -} - -void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) { - return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size)); -} - -void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) { - if (!ptr) - return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false)); - -#if HWASAN_WITH_INTERCEPTORS - // A tag of 0 means that this is a system allocator allocation, so we must use - // the system allocator to realloc it. - if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0) - return REAL(realloc)(ptr, size); -#endif - - if (size == 0) { - HwasanDeallocate(stack, ptr); - return nullptr; - } - return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64))); -} - -void *hwasan_valloc(uptr size, StackTrace *stack) { - return SetErrnoOnNull( - HwasanAllocate(stack, size, GetPageSizeCached(), false)); -} - -void *hwasan_pvalloc(uptr size, StackTrace *stack) { - uptr PageSize = GetPageSizeCached(); - if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { - errno = errno_ENOMEM; - if (AllocatorMayReturnNull()) - return nullptr; - ReportPvallocOverflow(size, stack); - } - // pvalloc(0) should allocate one page. - size = size ? RoundUpTo(size, PageSize) : PageSize; - return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false)); -} - -void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { - if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { - errno = errno_EINVAL; - if (AllocatorMayReturnNull()) - return nullptr; - ReportInvalidAlignedAllocAlignment(size, alignment, stack); - } - return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false)); -} - -void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) { - if (UNLIKELY(!IsPowerOfTwo(alignment))) { - errno = errno_EINVAL; - if (AllocatorMayReturnNull()) - return nullptr; - ReportInvalidAllocationAlignment(alignment, stack); - } - return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false)); -} - -int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size, - StackTrace *stack) { - if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { - if (AllocatorMayReturnNull()) - return errno_EINVAL; - ReportInvalidPosixMemalignAlignment(alignment, stack); - } - void *ptr = HwasanAllocate(stack, size, alignment, false); - if (UNLIKELY(!ptr)) - // OOM error is already taken care of by HwasanAllocate. - return errno_ENOMEM; - CHECK(IsAligned((uptr)ptr, alignment)); - *memptr = ptr; - return 0; -} - -void hwasan_free(void *ptr, StackTrace *stack) { -#if HWASAN_WITH_INTERCEPTORS - // A tag of 0 means that this is a system allocator allocation, so we must use - // the system allocator to free it. - if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0) - return REAL(free)(ptr); -#endif - - return HwasanDeallocate(stack, ptr); -} - -} // namespace __hwasan - -using namespace __hwasan; - -void __hwasan_enable_allocator_tagging() { - atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1); -} - -void __hwasan_disable_allocator_tagging() { -#if HWASAN_WITH_INTERCEPTORS - // Allocator tagging must be enabled for the system allocator fallback to work - // correctly. This means that we can't disable it at runtime if it was enabled - // at startup since that might result in our deallocations going to the system - // allocator. If tagging was disabled at startup we avoid this problem by - // disabling the fallback altogether. - CHECK(flags()->disable_allocator_tagging); -#endif - - atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0); -} - -uptr __sanitizer_get_current_allocated_bytes() { - uptr stats[AllocatorStatCount]; - allocator.GetStats(stats); - return stats[AllocatorStatAllocated]; -} - -uptr __sanitizer_get_heap_size() { - uptr stats[AllocatorStatCount]; - allocator.GetStats(stats); - return stats[AllocatorStatMapped]; -} - -uptr __sanitizer_get_free_bytes() { return 1; } - -uptr __sanitizer_get_unmapped_bytes() { return 1; } - -uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } - -int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } - -uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } Index: compiler-rt/trunk/lib/hwasan/hwasan_allocator.cpp =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_allocator.cpp +++ compiler-rt/trunk/lib/hwasan/hwasan_allocator.cpp @@ -0,0 +1,450 @@ +//===-- hwasan_allocator.cpp ------------------------ ---------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of HWAddressSanitizer. +// +// HWAddressSanitizer allocator. +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_errno.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "hwasan.h" +#include "hwasan_allocator.h" +#include "hwasan_mapping.h" +#include "hwasan_malloc_bisect.h" +#include "hwasan_thread.h" +#include "hwasan_report.h" + +#if HWASAN_WITH_INTERCEPTORS +DEFINE_REAL(void *, realloc, void *ptr, uptr size) +DEFINE_REAL(void, free, void *ptr) +#endif + +namespace __hwasan { + +static Allocator allocator; +static AllocatorCache fallback_allocator_cache; +static SpinMutex fallback_mutex; +static atomic_uint8_t hwasan_allocator_tagging_enabled; + +static const tag_t kFallbackAllocTag = 0xBB; +static const tag_t kFallbackFreeTag = 0xBC; + +enum RightAlignMode { + kRightAlignNever, + kRightAlignSometimes, + kRightAlignAlways +}; + +// These two variables are initialized from flags()->malloc_align_right +// in HwasanAllocatorInit and are never changed afterwards. +static RightAlignMode right_align_mode = kRightAlignNever; +static bool right_align_8 = false; + +// Initialized in HwasanAllocatorInit, an never changed. +static ALIGNED(16) u8 tail_magic[kShadowAlignment]; + +bool HwasanChunkView::IsAllocated() const { + return metadata_ && metadata_->alloc_context_id && metadata_->requested_size; +} + +// Aligns the 'addr' right to the granule boundary. +static uptr AlignRight(uptr addr, uptr requested_size) { + uptr tail_size = requested_size % kShadowAlignment; + if (!tail_size) return addr; + if (right_align_8) + return tail_size > 8 ? addr : addr + 8; + return addr + kShadowAlignment - tail_size; +} + +uptr HwasanChunkView::Beg() const { + if (metadata_ && metadata_->right_aligned) + return AlignRight(block_, metadata_->requested_size); + return block_; +} +uptr HwasanChunkView::End() const { + return Beg() + UsedSize(); +} +uptr HwasanChunkView::UsedSize() const { + return metadata_->requested_size; +} +u32 HwasanChunkView::GetAllocStackId() const { + return metadata_->alloc_context_id; +} + +uptr HwasanChunkView::ActualSize() const { + return allocator.GetActuallyAllocatedSize(reinterpret_cast(block_)); +} + +bool HwasanChunkView::FromSmallHeap() const { + return allocator.FromPrimary(reinterpret_cast(block_)); +} + +void GetAllocatorStats(AllocatorStatCounters s) { + allocator.GetStats(s); +} + +void HwasanAllocatorInit() { + atomic_store_relaxed(&hwasan_allocator_tagging_enabled, + !flags()->disable_allocator_tagging); + SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); + allocator.Init(common_flags()->allocator_release_to_os_interval_ms); + switch (flags()->malloc_align_right) { + case 0: break; + case 1: + right_align_mode = kRightAlignSometimes; + right_align_8 = false; + break; + case 2: + right_align_mode = kRightAlignAlways; + right_align_8 = false; + break; + case 8: + right_align_mode = kRightAlignSometimes; + right_align_8 = true; + break; + case 9: + right_align_mode = kRightAlignAlways; + right_align_8 = true; + break; + default: + Report("ERROR: unsupported value of malloc_align_right flag: %d\n", + flags()->malloc_align_right); + Die(); + } + for (uptr i = 0; i < kShadowAlignment; i++) + tail_magic[i] = GetCurrentThread()->GenerateRandomTag(); +} + +void AllocatorSwallowThreadLocalCache(AllocatorCache *cache) { + allocator.SwallowCache(cache); +} + +static uptr TaggedSize(uptr size) { + if (!size) size = 1; + uptr new_size = RoundUpTo(size, kShadowAlignment); + CHECK_GE(new_size, size); + return new_size; +} + +static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment, + bool zeroise) { + if (orig_size > kMaxAllowedMallocSize) { + if (AllocatorMayReturnNull()) { + Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n", + orig_size); + return nullptr; + } + ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack); + } + + alignment = Max(alignment, kShadowAlignment); + uptr size = TaggedSize(orig_size); + Thread *t = GetCurrentThread(); + void *allocated; + if (t) { + allocated = allocator.Allocate(t->allocator_cache(), size, alignment); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocated = allocator.Allocate(cache, size, alignment); + } + if (UNLIKELY(!allocated)) { + SetAllocatorOutOfMemory(); + if (AllocatorMayReturnNull()) + return nullptr; + ReportOutOfMemory(size, stack); + } + Metadata *meta = + reinterpret_cast(allocator.GetMetaData(allocated)); + meta->requested_size = static_cast(orig_size); + meta->alloc_context_id = StackDepotPut(*stack); + meta->right_aligned = false; + if (zeroise) { + internal_memset(allocated, 0, size); + } else if (flags()->max_malloc_fill_size > 0) { + uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size); + internal_memset(allocated, flags()->malloc_fill_byte, fill_size); + } + if (!right_align_mode) + internal_memcpy(reinterpret_cast(allocated) + orig_size, tail_magic, + size - orig_size); + + void *user_ptr = allocated; + // Tagging can only be skipped when both tag_in_malloc and tag_in_free are + // false. When tag_in_malloc = false and tag_in_free = true malloc needs to + // retag to 0. + if ((flags()->tag_in_malloc || flags()->tag_in_free) && + atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) { + tag_t tag = flags()->tag_in_malloc && malloc_bisect(stack, orig_size) + ? (t ? t->GenerateRandomTag() : kFallbackAllocTag) + : 0; + user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, tag); + } + + if ((orig_size % kShadowAlignment) && (alignment <= kShadowAlignment) && + right_align_mode) { + uptr as_uptr = reinterpret_cast(user_ptr); + if (right_align_mode == kRightAlignAlways || + GetTagFromPointer(as_uptr) & 1) { // use a tag bit as a random bit. + user_ptr = reinterpret_cast(AlignRight(as_uptr, orig_size)); + meta->right_aligned = 1; + } + } + + HWASAN_MALLOC_HOOK(user_ptr, size); + return user_ptr; +} + +static bool PointerAndMemoryTagsMatch(void *tagged_ptr) { + CHECK(tagged_ptr); + tag_t ptr_tag = GetTagFromPointer(reinterpret_cast(tagged_ptr)); + tag_t mem_tag = *reinterpret_cast( + MemToShadow(reinterpret_cast(UntagPtr(tagged_ptr)))); + return ptr_tag == mem_tag; +} + +static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) { + CHECK(tagged_ptr); + HWASAN_FREE_HOOK(tagged_ptr); + + if (!PointerAndMemoryTagsMatch(tagged_ptr)) + ReportInvalidFree(stack, reinterpret_cast(tagged_ptr)); + + void *untagged_ptr = UntagPtr(tagged_ptr); + void *aligned_ptr = reinterpret_cast( + RoundDownTo(reinterpret_cast(untagged_ptr), kShadowAlignment)); + Metadata *meta = + reinterpret_cast(allocator.GetMetaData(aligned_ptr)); + uptr orig_size = meta->requested_size; + u32 free_context_id = StackDepotPut(*stack); + u32 alloc_context_id = meta->alloc_context_id; + + // Check tail magic. + uptr tagged_size = TaggedSize(orig_size); + if (flags()->free_checks_tail_magic && !right_align_mode && orig_size) { + uptr tail_size = tagged_size - orig_size; + CHECK_LT(tail_size, kShadowAlignment); + void *tail_beg = reinterpret_cast( + reinterpret_cast(aligned_ptr) + orig_size); + if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size)) + ReportTailOverwritten(stack, reinterpret_cast(tagged_ptr), + orig_size, tail_size, tail_magic); + } + + meta->requested_size = 0; + meta->alloc_context_id = 0; + // This memory will not be reused by anyone else, so we are free to keep it + // poisoned. + Thread *t = GetCurrentThread(); + if (flags()->max_free_fill_size > 0) { + uptr fill_size = + Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size); + internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size); + } + if (flags()->tag_in_free && malloc_bisect(stack, 0) && + atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) + TagMemoryAligned(reinterpret_cast(aligned_ptr), TaggedSize(orig_size), + t ? t->GenerateRandomTag() : kFallbackFreeTag); + if (t) { + allocator.Deallocate(t->allocator_cache(), aligned_ptr); + if (auto *ha = t->heap_allocations()) + ha->push({reinterpret_cast(tagged_ptr), alloc_context_id, + free_context_id, static_cast(orig_size)}); + } else { + SpinMutexLock l(&fallback_mutex); + AllocatorCache *cache = &fallback_allocator_cache; + allocator.Deallocate(cache, aligned_ptr); + } +} + +static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old, + uptr new_size, uptr alignment) { + if (!PointerAndMemoryTagsMatch(tagged_ptr_old)) + ReportInvalidFree(stack, reinterpret_cast(tagged_ptr_old)); + + void *tagged_ptr_new = + HwasanAllocate(stack, new_size, alignment, false /*zeroise*/); + if (tagged_ptr_old && tagged_ptr_new) { + void *untagged_ptr_old = UntagPtr(tagged_ptr_old); + Metadata *meta = + reinterpret_cast(allocator.GetMetaData(untagged_ptr_old)); + internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old, + Min(new_size, static_cast(meta->requested_size))); + HwasanDeallocate(stack, tagged_ptr_old); + } + return tagged_ptr_new; +} + +static void *HwasanCalloc(StackTrace *stack, uptr nmemb, uptr size) { + if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { + if (AllocatorMayReturnNull()) + return nullptr; + ReportCallocOverflow(nmemb, size, stack); + } + return HwasanAllocate(stack, nmemb * size, sizeof(u64), true); +} + +HwasanChunkView FindHeapChunkByAddress(uptr address) { + void *block = allocator.GetBlockBegin(reinterpret_cast(address)); + if (!block) + return HwasanChunkView(); + Metadata *metadata = + reinterpret_cast(allocator.GetMetaData(block)); + return HwasanChunkView(reinterpret_cast(block), metadata); +} + +static uptr AllocationSize(const void *tagged_ptr) { + const void *untagged_ptr = UntagPtr(tagged_ptr); + if (!untagged_ptr) return 0; + const void *beg = allocator.GetBlockBegin(untagged_ptr); + Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr); + if (b->right_aligned) { + if (beg != reinterpret_cast(RoundDownTo( + reinterpret_cast(untagged_ptr), kShadowAlignment))) + return 0; + } else { + if (beg != untagged_ptr) return 0; + } + return b->requested_size; +} + +void *hwasan_malloc(uptr size, StackTrace *stack) { + return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false)); +} + +void *hwasan_calloc(uptr nmemb, uptr size, StackTrace *stack) { + return SetErrnoOnNull(HwasanCalloc(stack, nmemb, size)); +} + +void *hwasan_realloc(void *ptr, uptr size, StackTrace *stack) { + if (!ptr) + return SetErrnoOnNull(HwasanAllocate(stack, size, sizeof(u64), false)); + +#if HWASAN_WITH_INTERCEPTORS + // A tag of 0 means that this is a system allocator allocation, so we must use + // the system allocator to realloc it. + if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0) + return REAL(realloc)(ptr, size); +#endif + + if (size == 0) { + HwasanDeallocate(stack, ptr); + return nullptr; + } + return SetErrnoOnNull(HwasanReallocate(stack, ptr, size, sizeof(u64))); +} + +void *hwasan_valloc(uptr size, StackTrace *stack) { + return SetErrnoOnNull( + HwasanAllocate(stack, size, GetPageSizeCached(), false)); +} + +void *hwasan_pvalloc(uptr size, StackTrace *stack) { + uptr PageSize = GetPageSizeCached(); + if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { + errno = errno_ENOMEM; + if (AllocatorMayReturnNull()) + return nullptr; + ReportPvallocOverflow(size, stack); + } + // pvalloc(0) should allocate one page. + size = size ? RoundUpTo(size, PageSize) : PageSize; + return SetErrnoOnNull(HwasanAllocate(stack, size, PageSize, false)); +} + +void *hwasan_aligned_alloc(uptr alignment, uptr size, StackTrace *stack) { + if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAlignedAllocAlignment(size, alignment, stack); + } + return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false)); +} + +void *hwasan_memalign(uptr alignment, uptr size, StackTrace *stack) { + if (UNLIKELY(!IsPowerOfTwo(alignment))) { + errno = errno_EINVAL; + if (AllocatorMayReturnNull()) + return nullptr; + ReportInvalidAllocationAlignment(alignment, stack); + } + return SetErrnoOnNull(HwasanAllocate(stack, size, alignment, false)); +} + +int hwasan_posix_memalign(void **memptr, uptr alignment, uptr size, + StackTrace *stack) { + if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { + if (AllocatorMayReturnNull()) + return errno_EINVAL; + ReportInvalidPosixMemalignAlignment(alignment, stack); + } + void *ptr = HwasanAllocate(stack, size, alignment, false); + if (UNLIKELY(!ptr)) + // OOM error is already taken care of by HwasanAllocate. + return errno_ENOMEM; + CHECK(IsAligned((uptr)ptr, alignment)); + *memptr = ptr; + return 0; +} + +void hwasan_free(void *ptr, StackTrace *stack) { +#if HWASAN_WITH_INTERCEPTORS + // A tag of 0 means that this is a system allocator allocation, so we must use + // the system allocator to free it. + if (!flags()->disable_allocator_tagging && GetTagFromPointer((uptr)ptr) == 0) + return REAL(free)(ptr); +#endif + + return HwasanDeallocate(stack, ptr); +} + +} // namespace __hwasan + +using namespace __hwasan; + +void __hwasan_enable_allocator_tagging() { + atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 1); +} + +void __hwasan_disable_allocator_tagging() { +#if HWASAN_WITH_INTERCEPTORS + // Allocator tagging must be enabled for the system allocator fallback to work + // correctly. This means that we can't disable it at runtime if it was enabled + // at startup since that might result in our deallocations going to the system + // allocator. If tagging was disabled at startup we avoid this problem by + // disabling the fallback altogether. + CHECK(flags()->disable_allocator_tagging); +#endif + + atomic_store_relaxed(&hwasan_allocator_tagging_enabled, 0); +} + +uptr __sanitizer_get_current_allocated_bytes() { + uptr stats[AllocatorStatCount]; + allocator.GetStats(stats); + return stats[AllocatorStatAllocated]; +} + +uptr __sanitizer_get_heap_size() { + uptr stats[AllocatorStatCount]; + allocator.GetStats(stats); + return stats[AllocatorStatMapped]; +} + +uptr __sanitizer_get_free_bytes() { return 1; } + +uptr __sanitizer_get_unmapped_bytes() { return 1; } + +uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } + +int __sanitizer_get_ownership(const void *p) { return AllocationSize(p) != 0; } + +uptr __sanitizer_get_allocated_size(const void *p) { return AllocationSize(p); } Index: compiler-rt/trunk/lib/hwasan/hwasan_dynamic_shadow.cc =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_dynamic_shadow.cc +++ compiler-rt/trunk/lib/hwasan/hwasan_dynamic_shadow.cc @@ -1,164 +0,0 @@ -//===-- hwasan_dynamic_shadow.cc --------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// -/// \file -/// This file is a part of HWAddressSanitizer. It reserves dynamic shadow memory -/// region and handles ifunc resolver case, when necessary. -/// -//===----------------------------------------------------------------------===// - -#include "hwasan.h" -#include "hwasan_dynamic_shadow.h" -#include "hwasan_mapping.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_posix.h" - -#include -#include - -// The code in this file needs to run in an unrelocated binary. It should not -// access any external symbol, including its own non-hidden globals. - -namespace __hwasan { - -static void UnmapFromTo(uptr from, uptr to) { - if (to == from) - return; - CHECK(to >= from); - uptr res = internal_munmap(reinterpret_cast(from), to - from); - if (UNLIKELY(internal_iserror(res))) { - Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n", - SanitizerToolName, to - from, to - from, from); - CHECK("unable to unmap" && 0); - } -} - -// Returns an address aligned to kShadowBaseAlignment, such that -// 2**kShadowBaseAlingment on the left and shadow_size_bytes bytes on the right -// of it are mapped no access. -static uptr MapDynamicShadow(uptr shadow_size_bytes) { - const uptr granularity = GetMmapGranularity(); - const uptr min_alignment = granularity << kShadowScale; - const uptr alignment = 1ULL << kShadowBaseAlignment; - CHECK_GE(alignment, min_alignment); - - const uptr left_padding = 1ULL << kShadowBaseAlignment; - const uptr shadow_size = - RoundUpTo(shadow_size_bytes, granularity); - const uptr map_size = shadow_size + left_padding + alignment; - - const uptr map_start = (uptr)MmapNoAccess(map_size); - CHECK_NE(map_start, ~(uptr)0); - - const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); - - UnmapFromTo(map_start, shadow_start - left_padding); - UnmapFromTo(shadow_start + shadow_size, map_start + map_size); - - return shadow_start; -} - -} // namespace __hwasan - -#if SANITIZER_ANDROID -extern "C" { - -INTERFACE_ATTRIBUTE void __hwasan_shadow(); -decltype(__hwasan_shadow)* __hwasan_premap_shadow(); - -} // extern "C" - -namespace __hwasan { - -// Conservative upper limit. -static uptr PremapShadowSize() { - return RoundUpTo(GetMaxVirtualAddress() >> kShadowScale, - GetMmapGranularity()); -} - -static uptr PremapShadow() { - return MapDynamicShadow(PremapShadowSize()); -} - -static bool IsPremapShadowAvailable() { - const uptr shadow = reinterpret_cast(&__hwasan_shadow); - const uptr resolver = reinterpret_cast(&__hwasan_premap_shadow); - // shadow == resolver is how Android KitKat and older handles ifunc. - // shadow == 0 just in case. - return shadow != 0 && shadow != resolver; -} - -static uptr FindPremappedShadowStart(uptr shadow_size_bytes) { - const uptr granularity = GetMmapGranularity(); - const uptr shadow_start = reinterpret_cast(&__hwasan_shadow); - const uptr premap_shadow_size = PremapShadowSize(); - const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity); - - // We may have mapped too much. Release extra memory. - UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size); - return shadow_start; -} - -} // namespace __hwasan - -extern "C" { - -decltype(__hwasan_shadow)* __hwasan_premap_shadow() { - // The resolver might be called multiple times. Map the shadow just once. - static __sanitizer::uptr shadow = 0; - if (!shadow) - shadow = __hwasan::PremapShadow(); - return reinterpret_cast(shadow); -} - -// __hwasan_shadow is a "function" that has the same address as the first byte -// of the shadow mapping. -INTERFACE_ATTRIBUTE __attribute__((ifunc("__hwasan_premap_shadow"))) -void __hwasan_shadow(); - -extern __attribute((weak, visibility("hidden"))) ElfW(Rela) __rela_iplt_start[], - __rela_iplt_end[]; - -} // extern "C" - -namespace __hwasan { - -void InitShadowGOT() { - // Call the ifunc resolver for __hwasan_shadow and fill in its GOT entry. This - // needs to be done before other ifunc resolvers (which are handled by libc) - // because a resolver might read __hwasan_shadow. - typedef ElfW(Addr) (*ifunc_resolver_t)(void); - for (ElfW(Rela) *r = __rela_iplt_start; r != __rela_iplt_end; ++r) { - ElfW(Addr)* offset = reinterpret_cast(r->r_offset); - ElfW(Addr) resolver = r->r_addend; - if (resolver == reinterpret_cast(&__hwasan_premap_shadow)) { - *offset = reinterpret_cast(resolver)(); - break; - } - } -} - -uptr FindDynamicShadowStart(uptr shadow_size_bytes) { - if (IsPremapShadowAvailable()) - return FindPremappedShadowStart(shadow_size_bytes); - return MapDynamicShadow(shadow_size_bytes); -} - -} // namespace __hwasan -#else -namespace __hwasan { - -void InitShadowGOT() {} - -uptr FindDynamicShadowStart(uptr shadow_size_bytes) { - return MapDynamicShadow(shadow_size_bytes); -} - -} // namespace __hwasan - -#endif // SANITIZER_ANDROID Index: compiler-rt/trunk/lib/hwasan/hwasan_dynamic_shadow.cpp =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_dynamic_shadow.cpp +++ compiler-rt/trunk/lib/hwasan/hwasan_dynamic_shadow.cpp @@ -0,0 +1,164 @@ +//===-- hwasan_dynamic_shadow.cpp -------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file is a part of HWAddressSanitizer. It reserves dynamic shadow memory +/// region and handles ifunc resolver case, when necessary. +/// +//===----------------------------------------------------------------------===// + +#include "hwasan.h" +#include "hwasan_dynamic_shadow.h" +#include "hwasan_mapping.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_posix.h" + +#include +#include + +// The code in this file needs to run in an unrelocated binary. It should not +// access any external symbol, including its own non-hidden globals. + +namespace __hwasan { + +static void UnmapFromTo(uptr from, uptr to) { + if (to == from) + return; + CHECK(to >= from); + uptr res = internal_munmap(reinterpret_cast(from), to - from); + if (UNLIKELY(internal_iserror(res))) { + Report("ERROR: %s failed to unmap 0x%zx (%zd) bytes at address %p\n", + SanitizerToolName, to - from, to - from, from); + CHECK("unable to unmap" && 0); + } +} + +// Returns an address aligned to kShadowBaseAlignment, such that +// 2**kShadowBaseAlingment on the left and shadow_size_bytes bytes on the right +// of it are mapped no access. +static uptr MapDynamicShadow(uptr shadow_size_bytes) { + const uptr granularity = GetMmapGranularity(); + const uptr min_alignment = granularity << kShadowScale; + const uptr alignment = 1ULL << kShadowBaseAlignment; + CHECK_GE(alignment, min_alignment); + + const uptr left_padding = 1ULL << kShadowBaseAlignment; + const uptr shadow_size = + RoundUpTo(shadow_size_bytes, granularity); + const uptr map_size = shadow_size + left_padding + alignment; + + const uptr map_start = (uptr)MmapNoAccess(map_size); + CHECK_NE(map_start, ~(uptr)0); + + const uptr shadow_start = RoundUpTo(map_start + left_padding, alignment); + + UnmapFromTo(map_start, shadow_start - left_padding); + UnmapFromTo(shadow_start + shadow_size, map_start + map_size); + + return shadow_start; +} + +} // namespace __hwasan + +#if SANITIZER_ANDROID +extern "C" { + +INTERFACE_ATTRIBUTE void __hwasan_shadow(); +decltype(__hwasan_shadow)* __hwasan_premap_shadow(); + +} // extern "C" + +namespace __hwasan { + +// Conservative upper limit. +static uptr PremapShadowSize() { + return RoundUpTo(GetMaxVirtualAddress() >> kShadowScale, + GetMmapGranularity()); +} + +static uptr PremapShadow() { + return MapDynamicShadow(PremapShadowSize()); +} + +static bool IsPremapShadowAvailable() { + const uptr shadow = reinterpret_cast(&__hwasan_shadow); + const uptr resolver = reinterpret_cast(&__hwasan_premap_shadow); + // shadow == resolver is how Android KitKat and older handles ifunc. + // shadow == 0 just in case. + return shadow != 0 && shadow != resolver; +} + +static uptr FindPremappedShadowStart(uptr shadow_size_bytes) { + const uptr granularity = GetMmapGranularity(); + const uptr shadow_start = reinterpret_cast(&__hwasan_shadow); + const uptr premap_shadow_size = PremapShadowSize(); + const uptr shadow_size = RoundUpTo(shadow_size_bytes, granularity); + + // We may have mapped too much. Release extra memory. + UnmapFromTo(shadow_start + shadow_size, shadow_start + premap_shadow_size); + return shadow_start; +} + +} // namespace __hwasan + +extern "C" { + +decltype(__hwasan_shadow)* __hwasan_premap_shadow() { + // The resolver might be called multiple times. Map the shadow just once. + static __sanitizer::uptr shadow = 0; + if (!shadow) + shadow = __hwasan::PremapShadow(); + return reinterpret_cast(shadow); +} + +// __hwasan_shadow is a "function" that has the same address as the first byte +// of the shadow mapping. +INTERFACE_ATTRIBUTE __attribute__((ifunc("__hwasan_premap_shadow"))) +void __hwasan_shadow(); + +extern __attribute((weak, visibility("hidden"))) ElfW(Rela) __rela_iplt_start[], + __rela_iplt_end[]; + +} // extern "C" + +namespace __hwasan { + +void InitShadowGOT() { + // Call the ifunc resolver for __hwasan_shadow and fill in its GOT entry. This + // needs to be done before other ifunc resolvers (which are handled by libc) + // because a resolver might read __hwasan_shadow. + typedef ElfW(Addr) (*ifunc_resolver_t)(void); + for (ElfW(Rela) *r = __rela_iplt_start; r != __rela_iplt_end; ++r) { + ElfW(Addr)* offset = reinterpret_cast(r->r_offset); + ElfW(Addr) resolver = r->r_addend; + if (resolver == reinterpret_cast(&__hwasan_premap_shadow)) { + *offset = reinterpret_cast(resolver)(); + break; + } + } +} + +uptr FindDynamicShadowStart(uptr shadow_size_bytes) { + if (IsPremapShadowAvailable()) + return FindPremappedShadowStart(shadow_size_bytes); + return MapDynamicShadow(shadow_size_bytes); +} + +} // namespace __hwasan +#else +namespace __hwasan { + +void InitShadowGOT() {} + +uptr FindDynamicShadowStart(uptr shadow_size_bytes) { + return MapDynamicShadow(shadow_size_bytes); +} + +} // namespace __hwasan + +#endif // SANITIZER_ANDROID Index: compiler-rt/trunk/lib/hwasan/hwasan_interceptors.cc =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_interceptors.cc +++ compiler-rt/trunk/lib/hwasan/hwasan_interceptors.cc @@ -1,278 +0,0 @@ -//===-- hwasan_interceptors.cc --------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of HWAddressSanitizer. -// -// Interceptors for standard library functions. -// -// FIXME: move as many interceptors as possible into -// sanitizer_common/sanitizer_common_interceptors.h -//===----------------------------------------------------------------------===// - -#include "interception/interception.h" -#include "hwasan.h" -#include "hwasan_allocator.h" -#include "hwasan_mapping.h" -#include "hwasan_thread.h" -#include "hwasan_poisoning.h" -#include "hwasan_report.h" -#include "sanitizer_common/sanitizer_platform_limits_posix.h" -#include "sanitizer_common/sanitizer_allocator.h" -#include "sanitizer_common/sanitizer_allocator_interface.h" -#include "sanitizer_common/sanitizer_allocator_internal.h" -#include "sanitizer_common/sanitizer_atomic.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_errno.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_libc.h" -#include "sanitizer_common/sanitizer_linux.h" -#include "sanitizer_common/sanitizer_tls_get_addr.h" - -#include -// ACHTUNG! No other system header includes in this file. -// Ideally, we should get rid of stdarg.h as well. - -using namespace __hwasan; - -using __sanitizer::memory_order; -using __sanitizer::atomic_load; -using __sanitizer::atomic_store; -using __sanitizer::atomic_uintptr_t; - -bool IsInInterceptorScope() { - Thread *t = GetCurrentThread(); - return t && t->InInterceptorScope(); -} - -struct InterceptorScope { - InterceptorScope() { - Thread *t = GetCurrentThread(); - if (t) - t->EnterInterceptorScope(); - } - ~InterceptorScope() { - Thread *t = GetCurrentThread(); - if (t) - t->LeaveInterceptorScope(); - } -}; - -static uptr allocated_for_dlsym; -static const uptr kDlsymAllocPoolSize = 1024; -static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; - -static bool IsInDlsymAllocPool(const void *ptr) { - uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym; - return off < sizeof(alloc_memory_for_dlsym); -} - -static void *AllocateFromLocalPool(uptr size_in_bytes) { - uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize; - void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym]; - allocated_for_dlsym += size_in_words; - CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize); - return mem; -} - -#define ENSURE_HWASAN_INITED() do { \ - CHECK(!hwasan_init_is_running); \ - if (!hwasan_inited) { \ - __hwasan_init(); \ - } \ -} while (0) - - -int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) { - GET_MALLOC_STACK_TRACE; - CHECK_NE(memptr, 0); - int res = hwasan_posix_memalign(memptr, alignment, size, &stack); - return res; -} - -void * __sanitizer_memalign(uptr alignment, uptr size) { - GET_MALLOC_STACK_TRACE; - return hwasan_memalign(alignment, size, &stack); -} - -void * __sanitizer_aligned_alloc(uptr alignment, uptr size) { - GET_MALLOC_STACK_TRACE; - return hwasan_aligned_alloc(alignment, size, &stack); -} - -void * __sanitizer___libc_memalign(uptr alignment, uptr size) { - GET_MALLOC_STACK_TRACE; - void *ptr = hwasan_memalign(alignment, size, &stack); - if (ptr) - DTLS_on_libc_memalign(ptr, size); - return ptr; -} - -void * __sanitizer_valloc(uptr size) { - GET_MALLOC_STACK_TRACE; - return hwasan_valloc(size, &stack); -} - -void * __sanitizer_pvalloc(uptr size) { - GET_MALLOC_STACK_TRACE; - return hwasan_pvalloc(size, &stack); -} - -void __sanitizer_free(void *ptr) { - GET_MALLOC_STACK_TRACE; - if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return; - hwasan_free(ptr, &stack); -} - -void __sanitizer_cfree(void *ptr) { - GET_MALLOC_STACK_TRACE; - if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return; - hwasan_free(ptr, &stack); -} - -uptr __sanitizer_malloc_usable_size(const void *ptr) { - return __sanitizer_get_allocated_size(ptr); -} - -struct __sanitizer_struct_mallinfo __sanitizer_mallinfo() { - __sanitizer_struct_mallinfo sret; - internal_memset(&sret, 0, sizeof(sret)); - return sret; -} - -int __sanitizer_mallopt(int cmd, int value) { - return 0; -} - -void __sanitizer_malloc_stats(void) { - // FIXME: implement, but don't call REAL(malloc_stats)! -} - -void * __sanitizer_calloc(uptr nmemb, uptr size) { - GET_MALLOC_STACK_TRACE; - if (UNLIKELY(!hwasan_inited)) - // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. - return AllocateFromLocalPool(nmemb * size); - return hwasan_calloc(nmemb, size, &stack); -} - -void * __sanitizer_realloc(void *ptr, uptr size) { - GET_MALLOC_STACK_TRACE; - if (UNLIKELY(IsInDlsymAllocPool(ptr))) { - uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym; - uptr copy_size = Min(size, kDlsymAllocPoolSize - offset); - void *new_ptr; - if (UNLIKELY(!hwasan_inited)) { - new_ptr = AllocateFromLocalPool(copy_size); - } else { - copy_size = size; - new_ptr = hwasan_malloc(copy_size, &stack); - } - internal_memcpy(new_ptr, ptr, copy_size); - return new_ptr; - } - return hwasan_realloc(ptr, size, &stack); -} - -void * __sanitizer_malloc(uptr size) { - GET_MALLOC_STACK_TRACE; - if (UNLIKELY(!hwasan_init_is_running)) - ENSURE_HWASAN_INITED(); - if (UNLIKELY(!hwasan_inited)) - // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. - return AllocateFromLocalPool(size); - return hwasan_malloc(size, &stack); -} - -#if HWASAN_WITH_INTERCEPTORS -#define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \ - extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \ - ALIAS("__sanitizer_" #FN); \ - extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \ - ARGS) ALIAS("__sanitizer_" #FN) - -INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment, - SIZE_T size); -INTERCEPTOR_ALIAS(void *, aligned_alloc, SIZE_T alignment, SIZE_T size); -INTERCEPTOR_ALIAS(void *, __libc_memalign, SIZE_T alignment, SIZE_T size); -INTERCEPTOR_ALIAS(void *, valloc, SIZE_T size); -INTERCEPTOR_ALIAS(void, free, void *ptr); -INTERCEPTOR_ALIAS(uptr, malloc_usable_size, const void *ptr); -INTERCEPTOR_ALIAS(void *, calloc, SIZE_T nmemb, SIZE_T size); -INTERCEPTOR_ALIAS(void *, realloc, void *ptr, SIZE_T size); -INTERCEPTOR_ALIAS(void *, malloc, SIZE_T size); - -#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD -INTERCEPTOR_ALIAS(void *, memalign, SIZE_T alignment, SIZE_T size); -INTERCEPTOR_ALIAS(void *, pvalloc, SIZE_T size); -INTERCEPTOR_ALIAS(void, cfree, void *ptr); -INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo); -INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value); -INTERCEPTOR_ALIAS(void, malloc_stats, void); -#endif -#endif // HWASAN_WITH_INTERCEPTORS - - -#if HWASAN_WITH_INTERCEPTORS && !defined(__aarch64__) -INTERCEPTOR(int, pthread_create, void *th, void *attr, - void *(*callback)(void *), void *param) { - ScopedTaggingDisabler disabler; - int res = REAL(pthread_create)(UntagPtr(th), UntagPtr(attr), - callback, param); - return res; -} -#endif - -static void BeforeFork() { - StackDepotLockAll(); -} - -static void AfterFork() { - StackDepotUnlockAll(); -} - -INTERCEPTOR(int, fork, void) { - ENSURE_HWASAN_INITED(); - BeforeFork(); - int pid = REAL(fork)(); - AfterFork(); - return pid; -} - - -struct HwasanInterceptorContext { - bool in_interceptor_scope; -}; - -namespace __hwasan { - -int OnExit() { - // FIXME: ask frontend whether we need to return failure. - return 0; -} - -} // namespace __hwasan - -namespace __hwasan { - -void InitializeInterceptors() { - static int inited = 0; - CHECK_EQ(inited, 0); - - INTERCEPT_FUNCTION(fork); - -#if HWASAN_WITH_INTERCEPTORS -#if !defined(__aarch64__) - INTERCEPT_FUNCTION(pthread_create); -#endif - INTERCEPT_FUNCTION(realloc); - INTERCEPT_FUNCTION(free); -#endif - - inited = 1; -} -} // namespace __hwasan Index: compiler-rt/trunk/lib/hwasan/hwasan_interceptors.cpp =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_interceptors.cpp +++ compiler-rt/trunk/lib/hwasan/hwasan_interceptors.cpp @@ -0,0 +1,278 @@ +//===-- hwasan_interceptors.cpp -------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of HWAddressSanitizer. +// +// Interceptors for standard library functions. +// +// FIXME: move as many interceptors as possible into +// sanitizer_common/sanitizer_common_interceptors.h +//===----------------------------------------------------------------------===// + +#include "interception/interception.h" +#include "hwasan.h" +#include "hwasan_allocator.h" +#include "hwasan_mapping.h" +#include "hwasan_thread.h" +#include "hwasan_poisoning.h" +#include "hwasan_report.h" +#include "sanitizer_common/sanitizer_platform_limits_posix.h" +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_interface.h" +#include "sanitizer_common/sanitizer_allocator_internal.h" +#include "sanitizer_common/sanitizer_atomic.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_errno.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_libc.h" +#include "sanitizer_common/sanitizer_linux.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" + +#include +// ACHTUNG! No other system header includes in this file. +// Ideally, we should get rid of stdarg.h as well. + +using namespace __hwasan; + +using __sanitizer::memory_order; +using __sanitizer::atomic_load; +using __sanitizer::atomic_store; +using __sanitizer::atomic_uintptr_t; + +bool IsInInterceptorScope() { + Thread *t = GetCurrentThread(); + return t && t->InInterceptorScope(); +} + +struct InterceptorScope { + InterceptorScope() { + Thread *t = GetCurrentThread(); + if (t) + t->EnterInterceptorScope(); + } + ~InterceptorScope() { + Thread *t = GetCurrentThread(); + if (t) + t->LeaveInterceptorScope(); + } +}; + +static uptr allocated_for_dlsym; +static const uptr kDlsymAllocPoolSize = 1024; +static uptr alloc_memory_for_dlsym[kDlsymAllocPoolSize]; + +static bool IsInDlsymAllocPool(const void *ptr) { + uptr off = (uptr)ptr - (uptr)alloc_memory_for_dlsym; + return off < sizeof(alloc_memory_for_dlsym); +} + +static void *AllocateFromLocalPool(uptr size_in_bytes) { + uptr size_in_words = RoundUpTo(size_in_bytes, kWordSize) / kWordSize; + void *mem = (void *)&alloc_memory_for_dlsym[allocated_for_dlsym]; + allocated_for_dlsym += size_in_words; + CHECK_LT(allocated_for_dlsym, kDlsymAllocPoolSize); + return mem; +} + +#define ENSURE_HWASAN_INITED() do { \ + CHECK(!hwasan_init_is_running); \ + if (!hwasan_inited) { \ + __hwasan_init(); \ + } \ +} while (0) + + +int __sanitizer_posix_memalign(void **memptr, uptr alignment, uptr size) { + GET_MALLOC_STACK_TRACE; + CHECK_NE(memptr, 0); + int res = hwasan_posix_memalign(memptr, alignment, size, &stack); + return res; +} + +void * __sanitizer_memalign(uptr alignment, uptr size) { + GET_MALLOC_STACK_TRACE; + return hwasan_memalign(alignment, size, &stack); +} + +void * __sanitizer_aligned_alloc(uptr alignment, uptr size) { + GET_MALLOC_STACK_TRACE; + return hwasan_aligned_alloc(alignment, size, &stack); +} + +void * __sanitizer___libc_memalign(uptr alignment, uptr size) { + GET_MALLOC_STACK_TRACE; + void *ptr = hwasan_memalign(alignment, size, &stack); + if (ptr) + DTLS_on_libc_memalign(ptr, size); + return ptr; +} + +void * __sanitizer_valloc(uptr size) { + GET_MALLOC_STACK_TRACE; + return hwasan_valloc(size, &stack); +} + +void * __sanitizer_pvalloc(uptr size) { + GET_MALLOC_STACK_TRACE; + return hwasan_pvalloc(size, &stack); +} + +void __sanitizer_free(void *ptr) { + GET_MALLOC_STACK_TRACE; + if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return; + hwasan_free(ptr, &stack); +} + +void __sanitizer_cfree(void *ptr) { + GET_MALLOC_STACK_TRACE; + if (!ptr || UNLIKELY(IsInDlsymAllocPool(ptr))) return; + hwasan_free(ptr, &stack); +} + +uptr __sanitizer_malloc_usable_size(const void *ptr) { + return __sanitizer_get_allocated_size(ptr); +} + +struct __sanitizer_struct_mallinfo __sanitizer_mallinfo() { + __sanitizer_struct_mallinfo sret; + internal_memset(&sret, 0, sizeof(sret)); + return sret; +} + +int __sanitizer_mallopt(int cmd, int value) { + return 0; +} + +void __sanitizer_malloc_stats(void) { + // FIXME: implement, but don't call REAL(malloc_stats)! +} + +void * __sanitizer_calloc(uptr nmemb, uptr size) { + GET_MALLOC_STACK_TRACE; + if (UNLIKELY(!hwasan_inited)) + // Hack: dlsym calls calloc before REAL(calloc) is retrieved from dlsym. + return AllocateFromLocalPool(nmemb * size); + return hwasan_calloc(nmemb, size, &stack); +} + +void * __sanitizer_realloc(void *ptr, uptr size) { + GET_MALLOC_STACK_TRACE; + if (UNLIKELY(IsInDlsymAllocPool(ptr))) { + uptr offset = (uptr)ptr - (uptr)alloc_memory_for_dlsym; + uptr copy_size = Min(size, kDlsymAllocPoolSize - offset); + void *new_ptr; + if (UNLIKELY(!hwasan_inited)) { + new_ptr = AllocateFromLocalPool(copy_size); + } else { + copy_size = size; + new_ptr = hwasan_malloc(copy_size, &stack); + } + internal_memcpy(new_ptr, ptr, copy_size); + return new_ptr; + } + return hwasan_realloc(ptr, size, &stack); +} + +void * __sanitizer_malloc(uptr size) { + GET_MALLOC_STACK_TRACE; + if (UNLIKELY(!hwasan_init_is_running)) + ENSURE_HWASAN_INITED(); + if (UNLIKELY(!hwasan_inited)) + // Hack: dlsym calls malloc before REAL(malloc) is retrieved from dlsym. + return AllocateFromLocalPool(size); + return hwasan_malloc(size, &stack); +} + +#if HWASAN_WITH_INTERCEPTORS +#define INTERCEPTOR_ALIAS(RET, FN, ARGS...) \ + extern "C" SANITIZER_INTERFACE_ATTRIBUTE RET WRAP(FN)(ARGS) \ + ALIAS("__sanitizer_" #FN); \ + extern "C" SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE RET FN( \ + ARGS) ALIAS("__sanitizer_" #FN) + +INTERCEPTOR_ALIAS(int, posix_memalign, void **memptr, SIZE_T alignment, + SIZE_T size); +INTERCEPTOR_ALIAS(void *, aligned_alloc, SIZE_T alignment, SIZE_T size); +INTERCEPTOR_ALIAS(void *, __libc_memalign, SIZE_T alignment, SIZE_T size); +INTERCEPTOR_ALIAS(void *, valloc, SIZE_T size); +INTERCEPTOR_ALIAS(void, free, void *ptr); +INTERCEPTOR_ALIAS(uptr, malloc_usable_size, const void *ptr); +INTERCEPTOR_ALIAS(void *, calloc, SIZE_T nmemb, SIZE_T size); +INTERCEPTOR_ALIAS(void *, realloc, void *ptr, SIZE_T size); +INTERCEPTOR_ALIAS(void *, malloc, SIZE_T size); + +#if !SANITIZER_FREEBSD && !SANITIZER_NETBSD +INTERCEPTOR_ALIAS(void *, memalign, SIZE_T alignment, SIZE_T size); +INTERCEPTOR_ALIAS(void *, pvalloc, SIZE_T size); +INTERCEPTOR_ALIAS(void, cfree, void *ptr); +INTERCEPTOR_ALIAS(__sanitizer_struct_mallinfo, mallinfo); +INTERCEPTOR_ALIAS(int, mallopt, int cmd, int value); +INTERCEPTOR_ALIAS(void, malloc_stats, void); +#endif +#endif // HWASAN_WITH_INTERCEPTORS + + +#if HWASAN_WITH_INTERCEPTORS && !defined(__aarch64__) +INTERCEPTOR(int, pthread_create, void *th, void *attr, + void *(*callback)(void *), void *param) { + ScopedTaggingDisabler disabler; + int res = REAL(pthread_create)(UntagPtr(th), UntagPtr(attr), + callback, param); + return res; +} +#endif + +static void BeforeFork() { + StackDepotLockAll(); +} + +static void AfterFork() { + StackDepotUnlockAll(); +} + +INTERCEPTOR(int, fork, void) { + ENSURE_HWASAN_INITED(); + BeforeFork(); + int pid = REAL(fork)(); + AfterFork(); + return pid; +} + + +struct HwasanInterceptorContext { + bool in_interceptor_scope; +}; + +namespace __hwasan { + +int OnExit() { + // FIXME: ask frontend whether we need to return failure. + return 0; +} + +} // namespace __hwasan + +namespace __hwasan { + +void InitializeInterceptors() { + static int inited = 0; + CHECK_EQ(inited, 0); + + INTERCEPT_FUNCTION(fork); + +#if HWASAN_WITH_INTERCEPTORS +#if !defined(__aarch64__) + INTERCEPT_FUNCTION(pthread_create); +#endif + INTERCEPT_FUNCTION(realloc); + INTERCEPT_FUNCTION(free); +#endif + + inited = 1; +} +} // namespace __hwasan Index: compiler-rt/trunk/lib/hwasan/hwasan_linux.cc =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_linux.cc +++ compiler-rt/trunk/lib/hwasan/hwasan_linux.cc @@ -1,436 +0,0 @@ -//===-- hwasan_linux.cc -----------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// -/// \file -/// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and -/// FreeBSD-specific code. -/// -//===----------------------------------------------------------------------===// - -#include "sanitizer_common/sanitizer_platform.h" -#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD - -#include "hwasan.h" -#include "hwasan_dynamic_shadow.h" -#include "hwasan_interface_internal.h" -#include "hwasan_mapping.h" -#include "hwasan_report.h" -#include "hwasan_thread.h" -#include "hwasan_thread_list.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_procmaps.h" - -#if HWASAN_WITH_INTERCEPTORS && !SANITIZER_ANDROID -SANITIZER_INTERFACE_ATTRIBUTE -THREADLOCAL uptr __hwasan_tls; -#endif - -namespace __hwasan { - -static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { - CHECK_EQ((beg % GetMmapGranularity()), 0); - CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); - uptr size = end - beg + 1; - DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. - if (!MmapFixedNoReserve(beg, size, name)) { - Report( - "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " - "Perhaps you're using ulimit -v\n", - size); - Abort(); - } -} - -static void ProtectGap(uptr addr, uptr size) { - if (!size) - return; - void *res = MmapFixedNoAccess(addr, size, "shadow gap"); - if (addr == (uptr)res) - return; - // A few pages at the start of the address space can not be protected. - // But we really want to protect as much as possible, to prevent this memory - // being returned as a result of a non-FIXED mmap(). - if (addr == 0) { - uptr step = GetMmapGranularity(); - while (size > step) { - addr += step; - size -= step; - void *res = MmapFixedNoAccess(addr, size, "shadow gap"); - if (addr == (uptr)res) - return; - } - } - - Report( - "ERROR: Failed to protect shadow gap [%p, %p]. " - "HWASan cannot proceed correctly. ABORTING.\n", (void *)addr, - (void *)(addr + size)); - DumpProcessMap(); - Die(); -} - -static uptr kLowMemStart; -static uptr kLowMemEnd; -static uptr kLowShadowEnd; -static uptr kLowShadowStart; -static uptr kHighShadowStart; -static uptr kHighShadowEnd; -static uptr kHighMemStart; -static uptr kHighMemEnd; - -static void PrintRange(uptr start, uptr end, const char *name) { - Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name); -} - -static void PrintAddressSpaceLayout() { - PrintRange(kHighMemStart, kHighMemEnd, "HighMem"); - if (kHighShadowEnd + 1 < kHighMemStart) - PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap"); - else - CHECK_EQ(kHighShadowEnd + 1, kHighMemStart); - PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow"); - if (kLowShadowEnd + 1 < kHighShadowStart) - PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap"); - else - CHECK_EQ(kLowMemEnd + 1, kHighShadowStart); - PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow"); - if (kLowMemEnd + 1 < kLowShadowStart) - PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap"); - else - CHECK_EQ(kLowMemEnd + 1, kLowShadowStart); - PrintRange(kLowMemStart, kLowMemEnd, "LowMem"); - CHECK_EQ(0, kLowMemStart); -} - -static uptr GetHighMemEnd() { - // HighMem covers the upper part of the address space. - uptr max_address = GetMaxUserVirtualAddress(); - // Adjust max address to make sure that kHighMemEnd and kHighMemStart are - // properly aligned: - max_address |= (GetMmapGranularity() << kShadowScale) - 1; - return max_address; -} - -static void InitializeShadowBaseAddress(uptr shadow_size_bytes) { - __hwasan_shadow_memory_dynamic_address = - FindDynamicShadowStart(shadow_size_bytes); -} - -bool InitShadow() { - // Define the entire memory range. - kHighMemEnd = GetHighMemEnd(); - - // Determine shadow memory base offset. - InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd)); - - // Place the low memory first. - kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1; - kLowMemStart = 0; - - // Define the low shadow based on the already placed low memory. - kLowShadowEnd = MemToShadow(kLowMemEnd); - kLowShadowStart = __hwasan_shadow_memory_dynamic_address; - - // High shadow takes whatever memory is left up there (making sure it is not - // interfering with low memory in the fixed case). - kHighShadowEnd = MemToShadow(kHighMemEnd); - kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1; - - // High memory starts where allocated shadow allows. - kHighMemStart = ShadowToMem(kHighShadowStart); - - // Check the sanity of the defined memory ranges (there might be gaps). - CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0); - CHECK_GT(kHighMemStart, kHighShadowEnd); - CHECK_GT(kHighShadowEnd, kHighShadowStart); - CHECK_GT(kHighShadowStart, kLowMemEnd); - CHECK_GT(kLowMemEnd, kLowMemStart); - CHECK_GT(kLowShadowEnd, kLowShadowStart); - CHECK_GT(kLowShadowStart, kLowMemEnd); - - if (Verbosity()) - PrintAddressSpaceLayout(); - - // Reserve shadow memory. - ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow"); - ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow"); - - // Protect all the gaps. - ProtectGap(0, Min(kLowMemStart, kLowShadowStart)); - if (kLowMemEnd + 1 < kLowShadowStart) - ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1); - if (kLowShadowEnd + 1 < kHighShadowStart) - ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1); - if (kHighShadowEnd + 1 < kHighMemStart) - ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1); - - return true; -} - -void InitThreads() { - CHECK(__hwasan_shadow_memory_dynamic_address); - uptr guard_page_size = GetMmapGranularity(); - uptr thread_space_start = - __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment); - uptr thread_space_end = - __hwasan_shadow_memory_dynamic_address - guard_page_size; - ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1, - "hwasan threads"); - ProtectGap(thread_space_end, - __hwasan_shadow_memory_dynamic_address - thread_space_end); - InitThreadList(thread_space_start, thread_space_end - thread_space_start); -} - -static void MadviseShadowRegion(uptr beg, uptr end) { - uptr size = end - beg + 1; - if (common_flags()->no_huge_pages_for_shadow) - NoHugePagesInRegion(beg, size); - if (common_flags()->use_madv_dontdump) - DontDumpShadowMemory(beg, size); -} - -void MadviseShadow() { - MadviseShadowRegion(kLowShadowStart, kLowShadowEnd); - MadviseShadowRegion(kHighShadowStart, kHighShadowEnd); -} - -bool MemIsApp(uptr p) { - CHECK(GetTagFromPointer(p) == 0); - return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd); -} - -static void HwasanAtExit(void) { - if (common_flags()->print_module_map) - DumpProcessMap(); - if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0)) - ReportStats(); - if (hwasan_report_count > 0) { - // ReportAtExitStatistics(); - if (common_flags()->exitcode) - internal__exit(common_flags()->exitcode); - } -} - -void InstallAtExitHandler() { - atexit(HwasanAtExit); -} - -// ---------------------- TSD ---------------- {{{1 - -extern "C" void __hwasan_thread_enter() { - hwasanThreadList().CreateCurrentThread()->InitRandomState(); -} - -extern "C" void __hwasan_thread_exit() { - Thread *t = GetCurrentThread(); - // Make sure that signal handler can not see a stale current thread pointer. - atomic_signal_fence(memory_order_seq_cst); - if (t) - hwasanThreadList().ReleaseThread(t); -} - -#if HWASAN_WITH_INTERCEPTORS -static pthread_key_t tsd_key; -static bool tsd_key_inited = false; - -void HwasanTSDThreadInit() { - if (tsd_key_inited) - CHECK_EQ(0, pthread_setspecific(tsd_key, - (void *)GetPthreadDestructorIterations())); -} - -void HwasanTSDDtor(void *tsd) { - uptr iterations = (uptr)tsd; - if (iterations > 1) { - CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1))); - return; - } - __hwasan_thread_exit(); -} - -void HwasanTSDInit() { - CHECK(!tsd_key_inited); - tsd_key_inited = true; - CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor)); -} -#else -void HwasanTSDInit() {} -void HwasanTSDThreadInit() {} -#endif - -#if SANITIZER_ANDROID -uptr *GetCurrentThreadLongPtr() { - return (uptr *)get_android_tls_ptr(); -} -#else -uptr *GetCurrentThreadLongPtr() { - return &__hwasan_tls; -} -#endif - -#if SANITIZER_ANDROID -void AndroidTestTlsSlot() { - uptr kMagicValue = 0x010203040A0B0C0D; - uptr *tls_ptr = GetCurrentThreadLongPtr(); - uptr old_value = *tls_ptr; - *tls_ptr = kMagicValue; - dlerror(); - if (*(uptr *)get_android_tls_ptr() != kMagicValue) { - Printf( - "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used " - "for dlerror().\n"); - Die(); - } - *tls_ptr = old_value; -} -#else -void AndroidTestTlsSlot() {} -#endif - -Thread *GetCurrentThread() { - uptr *ThreadLong = GetCurrentThreadLongPtr(); -#if HWASAN_WITH_INTERCEPTORS - if (!*ThreadLong) - __hwasan_thread_enter(); -#endif - auto *R = (StackAllocationsRingBuffer *)ThreadLong; - return hwasanThreadList().GetThreadByBufferAddress((uptr)(R->Next())); -} - -struct AccessInfo { - uptr addr; - uptr size; - bool is_store; - bool is_load; - bool recover; -}; - -static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) { - // Access type is passed in a platform dependent way (see below) and encoded - // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is - // recoverable. Valid values of Y are 0 to 4, which are interpreted as - // log2(access_size), and 0xF, which means that access size is passed via - // platform dependent register (see below). -#if defined(__aarch64__) - // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF, - // access size is stored in X1 register. Access address is always in X0 - // register. - uptr pc = (uptr)info->si_addr; - const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff; - if ((code & 0xff00) != 0x900) - return AccessInfo{}; // Not ours. - - const bool is_store = code & 0x10; - const bool recover = code & 0x20; - const uptr addr = uc->uc_mcontext.regs[0]; - const unsigned size_log = code & 0xf; - if (size_log > 4 && size_log != 0xf) - return AccessInfo{}; // Not ours. - const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log; - -#elif defined(__x86_64__) - // Access type is encoded in the instruction following INT3 as - // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in - // RSI register. Access address is always in RDI register. - uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP]; - uint8_t *nop = (uint8_t*)pc; - if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 || - *(nop + 3) < 0x40) - return AccessInfo{}; // Not ours. - const unsigned code = *(nop + 3); - - const bool is_store = code & 0x10; - const bool recover = code & 0x20; - const uptr addr = uc->uc_mcontext.gregs[REG_RDI]; - const unsigned size_log = code & 0xf; - if (size_log > 4 && size_log != 0xf) - return AccessInfo{}; // Not ours. - const uptr size = - size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log; - -#else -# error Unsupported architecture -#endif - - return AccessInfo{addr, size, is_store, !is_store, recover}; -} - -static void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, - ucontext_t *uc) { - InternalMmapVector stack_buffer(1); - BufferedStackTrace *stack = stack_buffer.data(); - stack->Reset(); - GetStackTrace(stack, kStackTraceMax, pc, frame, uc, - common_flags()->fast_unwind_on_fatal); - - bool fatal = flags()->halt_on_error || !ai.recover; - ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal); -} - -static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) { - AccessInfo ai = GetAccessInfo(info, uc); - if (!ai.is_store && !ai.is_load) - return false; - - SignalContext sig{info, uc}; - HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc); - -#if defined(__aarch64__) - uc->uc_mcontext.pc += 4; -#elif defined(__x86_64__) -#else -# error Unsupported architecture -#endif - return true; -} - -extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_tag_mismatch( - uptr addr, uptr access_info) { - AccessInfo ai; - ai.is_store = access_info & 0x10; - ai.recover = false; - ai.addr = addr; - ai.size = 1 << (access_info & 0xf); - - HandleTagMismatch(ai, (uptr)__builtin_return_address(0), - (uptr)__builtin_frame_address(0), nullptr); - __builtin_unreachable(); -} - -static void OnStackUnwind(const SignalContext &sig, const void *, - BufferedStackTrace *stack) { - GetStackTrace(stack, kStackTraceMax, StackTrace::GetNextInstructionPc(sig.pc), - sig.bp, sig.context, common_flags()->fast_unwind_on_fatal); -} - -void HwasanOnDeadlySignal(int signo, void *info, void *context) { - // Probably a tag mismatch. - if (signo == SIGTRAP) - if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context)) - return; - - HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr); -} - - -} // namespace __hwasan - -#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD Index: compiler-rt/trunk/lib/hwasan/hwasan_linux.cpp =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_linux.cpp +++ compiler-rt/trunk/lib/hwasan/hwasan_linux.cpp @@ -0,0 +1,436 @@ +//===-- hwasan_linux.cpp ----------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file is a part of HWAddressSanitizer and contains Linux-, NetBSD- and +/// FreeBSD-specific code. +/// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" +#if SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD + +#include "hwasan.h" +#include "hwasan_dynamic_shadow.h" +#include "hwasan_interface_internal.h" +#include "hwasan_mapping.h" +#include "hwasan_report.h" +#include "hwasan_thread.h" +#include "hwasan_thread_list.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_procmaps.h" + +#if HWASAN_WITH_INTERCEPTORS && !SANITIZER_ANDROID +SANITIZER_INTERFACE_ATTRIBUTE +THREADLOCAL uptr __hwasan_tls; +#endif + +namespace __hwasan { + +static void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name) { + CHECK_EQ((beg % GetMmapGranularity()), 0); + CHECK_EQ(((end + 1) % GetMmapGranularity()), 0); + uptr size = end - beg + 1; + DecreaseTotalMmap(size); // Don't count the shadow against mmap_limit_mb. + if (!MmapFixedNoReserve(beg, size, name)) { + Report( + "ReserveShadowMemoryRange failed while trying to map 0x%zx bytes. " + "Perhaps you're using ulimit -v\n", + size); + Abort(); + } +} + +static void ProtectGap(uptr addr, uptr size) { + if (!size) + return; + void *res = MmapFixedNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) + return; + // A few pages at the start of the address space can not be protected. + // But we really want to protect as much as possible, to prevent this memory + // being returned as a result of a non-FIXED mmap(). + if (addr == 0) { + uptr step = GetMmapGranularity(); + while (size > step) { + addr += step; + size -= step; + void *res = MmapFixedNoAccess(addr, size, "shadow gap"); + if (addr == (uptr)res) + return; + } + } + + Report( + "ERROR: Failed to protect shadow gap [%p, %p]. " + "HWASan cannot proceed correctly. ABORTING.\n", (void *)addr, + (void *)(addr + size)); + DumpProcessMap(); + Die(); +} + +static uptr kLowMemStart; +static uptr kLowMemEnd; +static uptr kLowShadowEnd; +static uptr kLowShadowStart; +static uptr kHighShadowStart; +static uptr kHighShadowEnd; +static uptr kHighMemStart; +static uptr kHighMemEnd; + +static void PrintRange(uptr start, uptr end, const char *name) { + Printf("|| [%p, %p] || %.*s ||\n", (void *)start, (void *)end, 10, name); +} + +static void PrintAddressSpaceLayout() { + PrintRange(kHighMemStart, kHighMemEnd, "HighMem"); + if (kHighShadowEnd + 1 < kHighMemStart) + PrintRange(kHighShadowEnd + 1, kHighMemStart - 1, "ShadowGap"); + else + CHECK_EQ(kHighShadowEnd + 1, kHighMemStart); + PrintRange(kHighShadowStart, kHighShadowEnd, "HighShadow"); + if (kLowShadowEnd + 1 < kHighShadowStart) + PrintRange(kLowShadowEnd + 1, kHighShadowStart - 1, "ShadowGap"); + else + CHECK_EQ(kLowMemEnd + 1, kHighShadowStart); + PrintRange(kLowShadowStart, kLowShadowEnd, "LowShadow"); + if (kLowMemEnd + 1 < kLowShadowStart) + PrintRange(kLowMemEnd + 1, kLowShadowStart - 1, "ShadowGap"); + else + CHECK_EQ(kLowMemEnd + 1, kLowShadowStart); + PrintRange(kLowMemStart, kLowMemEnd, "LowMem"); + CHECK_EQ(0, kLowMemStart); +} + +static uptr GetHighMemEnd() { + // HighMem covers the upper part of the address space. + uptr max_address = GetMaxUserVirtualAddress(); + // Adjust max address to make sure that kHighMemEnd and kHighMemStart are + // properly aligned: + max_address |= (GetMmapGranularity() << kShadowScale) - 1; + return max_address; +} + +static void InitializeShadowBaseAddress(uptr shadow_size_bytes) { + __hwasan_shadow_memory_dynamic_address = + FindDynamicShadowStart(shadow_size_bytes); +} + +bool InitShadow() { + // Define the entire memory range. + kHighMemEnd = GetHighMemEnd(); + + // Determine shadow memory base offset. + InitializeShadowBaseAddress(MemToShadowSize(kHighMemEnd)); + + // Place the low memory first. + kLowMemEnd = __hwasan_shadow_memory_dynamic_address - 1; + kLowMemStart = 0; + + // Define the low shadow based on the already placed low memory. + kLowShadowEnd = MemToShadow(kLowMemEnd); + kLowShadowStart = __hwasan_shadow_memory_dynamic_address; + + // High shadow takes whatever memory is left up there (making sure it is not + // interfering with low memory in the fixed case). + kHighShadowEnd = MemToShadow(kHighMemEnd); + kHighShadowStart = Max(kLowMemEnd, MemToShadow(kHighShadowEnd)) + 1; + + // High memory starts where allocated shadow allows. + kHighMemStart = ShadowToMem(kHighShadowStart); + + // Check the sanity of the defined memory ranges (there might be gaps). + CHECK_EQ(kHighMemStart % GetMmapGranularity(), 0); + CHECK_GT(kHighMemStart, kHighShadowEnd); + CHECK_GT(kHighShadowEnd, kHighShadowStart); + CHECK_GT(kHighShadowStart, kLowMemEnd); + CHECK_GT(kLowMemEnd, kLowMemStart); + CHECK_GT(kLowShadowEnd, kLowShadowStart); + CHECK_GT(kLowShadowStart, kLowMemEnd); + + if (Verbosity()) + PrintAddressSpaceLayout(); + + // Reserve shadow memory. + ReserveShadowMemoryRange(kLowShadowStart, kLowShadowEnd, "low shadow"); + ReserveShadowMemoryRange(kHighShadowStart, kHighShadowEnd, "high shadow"); + + // Protect all the gaps. + ProtectGap(0, Min(kLowMemStart, kLowShadowStart)); + if (kLowMemEnd + 1 < kLowShadowStart) + ProtectGap(kLowMemEnd + 1, kLowShadowStart - kLowMemEnd - 1); + if (kLowShadowEnd + 1 < kHighShadowStart) + ProtectGap(kLowShadowEnd + 1, kHighShadowStart - kLowShadowEnd - 1); + if (kHighShadowEnd + 1 < kHighMemStart) + ProtectGap(kHighShadowEnd + 1, kHighMemStart - kHighShadowEnd - 1); + + return true; +} + +void InitThreads() { + CHECK(__hwasan_shadow_memory_dynamic_address); + uptr guard_page_size = GetMmapGranularity(); + uptr thread_space_start = + __hwasan_shadow_memory_dynamic_address - (1ULL << kShadowBaseAlignment); + uptr thread_space_end = + __hwasan_shadow_memory_dynamic_address - guard_page_size; + ReserveShadowMemoryRange(thread_space_start, thread_space_end - 1, + "hwasan threads"); + ProtectGap(thread_space_end, + __hwasan_shadow_memory_dynamic_address - thread_space_end); + InitThreadList(thread_space_start, thread_space_end - thread_space_start); +} + +static void MadviseShadowRegion(uptr beg, uptr end) { + uptr size = end - beg + 1; + if (common_flags()->no_huge_pages_for_shadow) + NoHugePagesInRegion(beg, size); + if (common_flags()->use_madv_dontdump) + DontDumpShadowMemory(beg, size); +} + +void MadviseShadow() { + MadviseShadowRegion(kLowShadowStart, kLowShadowEnd); + MadviseShadowRegion(kHighShadowStart, kHighShadowEnd); +} + +bool MemIsApp(uptr p) { + CHECK(GetTagFromPointer(p) == 0); + return p >= kHighMemStart || (p >= kLowMemStart && p <= kLowMemEnd); +} + +static void HwasanAtExit(void) { + if (common_flags()->print_module_map) + DumpProcessMap(); + if (flags()->print_stats && (flags()->atexit || hwasan_report_count > 0)) + ReportStats(); + if (hwasan_report_count > 0) { + // ReportAtExitStatistics(); + if (common_flags()->exitcode) + internal__exit(common_flags()->exitcode); + } +} + +void InstallAtExitHandler() { + atexit(HwasanAtExit); +} + +// ---------------------- TSD ---------------- {{{1 + +extern "C" void __hwasan_thread_enter() { + hwasanThreadList().CreateCurrentThread()->InitRandomState(); +} + +extern "C" void __hwasan_thread_exit() { + Thread *t = GetCurrentThread(); + // Make sure that signal handler can not see a stale current thread pointer. + atomic_signal_fence(memory_order_seq_cst); + if (t) + hwasanThreadList().ReleaseThread(t); +} + +#if HWASAN_WITH_INTERCEPTORS +static pthread_key_t tsd_key; +static bool tsd_key_inited = false; + +void HwasanTSDThreadInit() { + if (tsd_key_inited) + CHECK_EQ(0, pthread_setspecific(tsd_key, + (void *)GetPthreadDestructorIterations())); +} + +void HwasanTSDDtor(void *tsd) { + uptr iterations = (uptr)tsd; + if (iterations > 1) { + CHECK_EQ(0, pthread_setspecific(tsd_key, (void *)(iterations - 1))); + return; + } + __hwasan_thread_exit(); +} + +void HwasanTSDInit() { + CHECK(!tsd_key_inited); + tsd_key_inited = true; + CHECK_EQ(0, pthread_key_create(&tsd_key, HwasanTSDDtor)); +} +#else +void HwasanTSDInit() {} +void HwasanTSDThreadInit() {} +#endif + +#if SANITIZER_ANDROID +uptr *GetCurrentThreadLongPtr() { + return (uptr *)get_android_tls_ptr(); +} +#else +uptr *GetCurrentThreadLongPtr() { + return &__hwasan_tls; +} +#endif + +#if SANITIZER_ANDROID +void AndroidTestTlsSlot() { + uptr kMagicValue = 0x010203040A0B0C0D; + uptr *tls_ptr = GetCurrentThreadLongPtr(); + uptr old_value = *tls_ptr; + *tls_ptr = kMagicValue; + dlerror(); + if (*(uptr *)get_android_tls_ptr() != kMagicValue) { + Printf( + "ERROR: Incompatible version of Android: TLS_SLOT_SANITIZER(6) is used " + "for dlerror().\n"); + Die(); + } + *tls_ptr = old_value; +} +#else +void AndroidTestTlsSlot() {} +#endif + +Thread *GetCurrentThread() { + uptr *ThreadLong = GetCurrentThreadLongPtr(); +#if HWASAN_WITH_INTERCEPTORS + if (!*ThreadLong) + __hwasan_thread_enter(); +#endif + auto *R = (StackAllocationsRingBuffer *)ThreadLong; + return hwasanThreadList().GetThreadByBufferAddress((uptr)(R->Next())); +} + +struct AccessInfo { + uptr addr; + uptr size; + bool is_store; + bool is_load; + bool recover; +}; + +static AccessInfo GetAccessInfo(siginfo_t *info, ucontext_t *uc) { + // Access type is passed in a platform dependent way (see below) and encoded + // as 0xXY, where X&1 is 1 for store, 0 for load, and X&2 is 1 if the error is + // recoverable. Valid values of Y are 0 to 4, which are interpreted as + // log2(access_size), and 0xF, which means that access size is passed via + // platform dependent register (see below). +#if defined(__aarch64__) + // Access type is encoded in BRK immediate as 0x900 + 0xXY. For Y == 0xF, + // access size is stored in X1 register. Access address is always in X0 + // register. + uptr pc = (uptr)info->si_addr; + const unsigned code = ((*(u32 *)pc) >> 5) & 0xffff; + if ((code & 0xff00) != 0x900) + return AccessInfo{}; // Not ours. + + const bool is_store = code & 0x10; + const bool recover = code & 0x20; + const uptr addr = uc->uc_mcontext.regs[0]; + const unsigned size_log = code & 0xf; + if (size_log > 4 && size_log != 0xf) + return AccessInfo{}; // Not ours. + const uptr size = size_log == 0xf ? uc->uc_mcontext.regs[1] : 1U << size_log; + +#elif defined(__x86_64__) + // Access type is encoded in the instruction following INT3 as + // NOP DWORD ptr [EAX + 0x40 + 0xXY]. For Y == 0xF, access size is stored in + // RSI register. Access address is always in RDI register. + uptr pc = (uptr)uc->uc_mcontext.gregs[REG_RIP]; + uint8_t *nop = (uint8_t*)pc; + if (*nop != 0x0f || *(nop + 1) != 0x1f || *(nop + 2) != 0x40 || + *(nop + 3) < 0x40) + return AccessInfo{}; // Not ours. + const unsigned code = *(nop + 3); + + const bool is_store = code & 0x10; + const bool recover = code & 0x20; + const uptr addr = uc->uc_mcontext.gregs[REG_RDI]; + const unsigned size_log = code & 0xf; + if (size_log > 4 && size_log != 0xf) + return AccessInfo{}; // Not ours. + const uptr size = + size_log == 0xf ? uc->uc_mcontext.gregs[REG_RSI] : 1U << size_log; + +#else +# error Unsupported architecture +#endif + + return AccessInfo{addr, size, is_store, !is_store, recover}; +} + +static void HandleTagMismatch(AccessInfo ai, uptr pc, uptr frame, + ucontext_t *uc) { + InternalMmapVector stack_buffer(1); + BufferedStackTrace *stack = stack_buffer.data(); + stack->Reset(); + GetStackTrace(stack, kStackTraceMax, pc, frame, uc, + common_flags()->fast_unwind_on_fatal); + + bool fatal = flags()->halt_on_error || !ai.recover; + ReportTagMismatch(stack, ai.addr, ai.size, ai.is_store, fatal); +} + +static bool HwasanOnSIGTRAP(int signo, siginfo_t *info, ucontext_t *uc) { + AccessInfo ai = GetAccessInfo(info, uc); + if (!ai.is_store && !ai.is_load) + return false; + + SignalContext sig{info, uc}; + HandleTagMismatch(ai, StackTrace::GetNextInstructionPc(sig.pc), sig.bp, uc); + +#if defined(__aarch64__) + uc->uc_mcontext.pc += 4; +#elif defined(__x86_64__) +#else +# error Unsupported architecture +#endif + return true; +} + +extern "C" SANITIZER_INTERFACE_ATTRIBUTE void __hwasan_tag_mismatch( + uptr addr, uptr access_info) { + AccessInfo ai; + ai.is_store = access_info & 0x10; + ai.recover = false; + ai.addr = addr; + ai.size = 1 << (access_info & 0xf); + + HandleTagMismatch(ai, (uptr)__builtin_return_address(0), + (uptr)__builtin_frame_address(0), nullptr); + __builtin_unreachable(); +} + +static void OnStackUnwind(const SignalContext &sig, const void *, + BufferedStackTrace *stack) { + GetStackTrace(stack, kStackTraceMax, StackTrace::GetNextInstructionPc(sig.pc), + sig.bp, sig.context, common_flags()->fast_unwind_on_fatal); +} + +void HwasanOnDeadlySignal(int signo, void *info, void *context) { + // Probably a tag mismatch. + if (signo == SIGTRAP) + if (HwasanOnSIGTRAP(signo, (siginfo_t *)info, (ucontext_t*)context)) + return; + + HandleDeadlySignal(info, context, GetTid(), &OnStackUnwind, nullptr); +} + + +} // namespace __hwasan + +#endif // SANITIZER_FREEBSD || SANITIZER_LINUX || SANITIZER_NETBSD Index: compiler-rt/trunk/lib/hwasan/hwasan_memintrinsics.cc =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_memintrinsics.cc +++ compiler-rt/trunk/lib/hwasan/hwasan_memintrinsics.cc @@ -1,44 +0,0 @@ -//===-- hwasan_memintrinsics.cc ---------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -/// -/// \file -/// This file is a part of HWAddressSanitizer and contains HWASAN versions of -/// memset, memcpy and memmove -/// -//===----------------------------------------------------------------------===// - -#include -#include "hwasan.h" -#include "hwasan_checks.h" -#include "hwasan_flags.h" -#include "hwasan_interface_internal.h" -#include "sanitizer_common/sanitizer_libc.h" - -using namespace __hwasan; - -void *__hwasan_memset(void *block, int c, uptr size) { - CheckAddressSized( - reinterpret_cast(block), size); - return memset(UntagPtr(block), c, size); -} - -void *__hwasan_memcpy(void *to, const void *from, uptr size) { - CheckAddressSized( - reinterpret_cast(to), size); - CheckAddressSized( - reinterpret_cast(from), size); - return memcpy(UntagPtr(to), UntagPtr(from), size); -} - -void *__hwasan_memmove(void *to, const void *from, uptr size) { - CheckAddressSized( - reinterpret_cast(to), size); - CheckAddressSized( - reinterpret_cast(from), size); - return memmove(UntagPtr(to), UntagPtr(from), size); -} Index: compiler-rt/trunk/lib/hwasan/hwasan_memintrinsics.cpp =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_memintrinsics.cpp +++ compiler-rt/trunk/lib/hwasan/hwasan_memintrinsics.cpp @@ -0,0 +1,44 @@ +//===-- hwasan_memintrinsics.cpp --------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +/// +/// \file +/// This file is a part of HWAddressSanitizer and contains HWASAN versions of +/// memset, memcpy and memmove +/// +//===----------------------------------------------------------------------===// + +#include +#include "hwasan.h" +#include "hwasan_checks.h" +#include "hwasan_flags.h" +#include "hwasan_interface_internal.h" +#include "sanitizer_common/sanitizer_libc.h" + +using namespace __hwasan; + +void *__hwasan_memset(void *block, int c, uptr size) { + CheckAddressSized( + reinterpret_cast(block), size); + return memset(UntagPtr(block), c, size); +} + +void *__hwasan_memcpy(void *to, const void *from, uptr size) { + CheckAddressSized( + reinterpret_cast(to), size); + CheckAddressSized( + reinterpret_cast(from), size); + return memcpy(UntagPtr(to), UntagPtr(from), size); +} + +void *__hwasan_memmove(void *to, const void *from, uptr size) { + CheckAddressSized( + reinterpret_cast(to), size); + CheckAddressSized( + reinterpret_cast(from), size); + return memmove(UntagPtr(to), UntagPtr(from), size); +} Index: compiler-rt/trunk/lib/hwasan/hwasan_new_delete.cc =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_new_delete.cc +++ compiler-rt/trunk/lib/hwasan/hwasan_new_delete.cc @@ -1,66 +0,0 @@ -//===-- hwasan_new_delete.cc ----------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of HWAddressSanitizer. -// -// Interceptors for operators new and delete. -//===----------------------------------------------------------------------===// - -#include "hwasan.h" -#include "interception/interception.h" -#include "sanitizer_common/sanitizer_allocator.h" -#include "sanitizer_common/sanitizer_allocator_report.h" - -#if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE - -#include - -using namespace __hwasan; // NOLINT - -// Fake std::nothrow_t to avoid including . -namespace std { - struct nothrow_t {}; -} // namespace std - - -// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. -#define OPERATOR_NEW_BODY(nothrow) \ - GET_MALLOC_STACK_TRACE; \ - void *res = hwasan_malloc(size, &stack);\ - if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ - return res - -INTERCEPTOR_ATTRIBUTE -void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE -void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } -INTERCEPTOR_ATTRIBUTE -void *operator new(size_t size, std::nothrow_t const&) { - OPERATOR_NEW_BODY(true /*nothrow*/); -} -INTERCEPTOR_ATTRIBUTE -void *operator new[](size_t size, std::nothrow_t const&) { - OPERATOR_NEW_BODY(true /*nothrow*/); -} - -#define OPERATOR_DELETE_BODY \ - GET_MALLOC_STACK_TRACE; \ - if (ptr) hwasan_free(ptr, &stack) - -INTERCEPTOR_ATTRIBUTE -void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } -INTERCEPTOR_ATTRIBUTE -void operator delete[](void *ptr, std::nothrow_t const&) { - OPERATOR_DELETE_BODY; -} - -#endif // HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE Index: compiler-rt/trunk/lib/hwasan/hwasan_new_delete.cpp =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_new_delete.cpp +++ compiler-rt/trunk/lib/hwasan/hwasan_new_delete.cpp @@ -0,0 +1,66 @@ +//===-- hwasan_new_delete.cpp ---------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of HWAddressSanitizer. +// +// Interceptors for operators new and delete. +//===----------------------------------------------------------------------===// + +#include "hwasan.h" +#include "interception/interception.h" +#include "sanitizer_common/sanitizer_allocator.h" +#include "sanitizer_common/sanitizer_allocator_report.h" + +#if HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE + +#include + +using namespace __hwasan; // NOLINT + +// Fake std::nothrow_t to avoid including . +namespace std { + struct nothrow_t {}; +} // namespace std + + +// TODO(alekseys): throw std::bad_alloc instead of dying on OOM. +#define OPERATOR_NEW_BODY(nothrow) \ + GET_MALLOC_STACK_TRACE; \ + void *res = hwasan_malloc(size, &stack);\ + if (!nothrow && UNLIKELY(!res)) ReportOutOfMemory(size, &stack);\ + return res + +INTERCEPTOR_ATTRIBUTE +void *operator new(size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new[](size_t size) { OPERATOR_NEW_BODY(false /*nothrow*/); } +INTERCEPTOR_ATTRIBUTE +void *operator new(size_t size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(true /*nothrow*/); +} +INTERCEPTOR_ATTRIBUTE +void *operator new[](size_t size, std::nothrow_t const&) { + OPERATOR_NEW_BODY(true /*nothrow*/); +} + +#define OPERATOR_DELETE_BODY \ + GET_MALLOC_STACK_TRACE; \ + if (ptr) hwasan_free(ptr, &stack) + +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr) NOEXCEPT { OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete(void *ptr, std::nothrow_t const&) { OPERATOR_DELETE_BODY; } +INTERCEPTOR_ATTRIBUTE +void operator delete[](void *ptr, std::nothrow_t const&) { + OPERATOR_DELETE_BODY; +} + +#endif // HWASAN_REPLACE_OPERATORS_NEW_AND_DELETE Index: compiler-rt/trunk/lib/hwasan/hwasan_poisoning.cc =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_poisoning.cc +++ compiler-rt/trunk/lib/hwasan/hwasan_poisoning.cc @@ -1,52 +0,0 @@ -//===-- hwasan_poisoning.cc -------------------------------------*- C++ -*-===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of HWAddressSanitizer. -// -//===----------------------------------------------------------------------===// - -#include "hwasan_poisoning.h" - -#include "hwasan_mapping.h" -#include "interception/interception.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_linux.h" - -namespace __hwasan { - -uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) { - CHECK(IsAligned(p, kShadowAlignment)); - CHECK(IsAligned(size, kShadowAlignment)); - uptr shadow_start = MemToShadow(p); - uptr shadow_size = MemToShadowSize(size); - - uptr page_size = GetPageSizeCached(); - uptr page_start = RoundUpTo(shadow_start, page_size); - uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size); - uptr threshold = common_flags()->clear_shadow_mmap_threshold; - if (SANITIZER_LINUX && - UNLIKELY(page_end >= page_start + threshold && tag == 0)) { - internal_memset((void *)shadow_start, tag, page_start - shadow_start); - internal_memset((void *)page_end, tag, - shadow_start + shadow_size - page_end); - // For an anonymous private mapping MADV_DONTNEED will return a zero page on - // Linux. - ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end); - } else { - internal_memset((void *)shadow_start, tag, shadow_size); - } - return AddTagToPointer(p, tag); -} - -uptr TagMemory(uptr p, uptr size, tag_t tag) { - uptr start = RoundDownTo(p, kShadowAlignment); - uptr end = RoundUpTo(p + size, kShadowAlignment); - return TagMemoryAligned(start, end - start, tag); -} - -} // namespace __hwasan Index: compiler-rt/trunk/lib/hwasan/hwasan_poisoning.cpp =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_poisoning.cpp +++ compiler-rt/trunk/lib/hwasan/hwasan_poisoning.cpp @@ -0,0 +1,52 @@ +//===-- hwasan_poisoning.cpp ------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of HWAddressSanitizer. +// +//===----------------------------------------------------------------------===// + +#include "hwasan_poisoning.h" + +#include "hwasan_mapping.h" +#include "interception/interception.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_linux.h" + +namespace __hwasan { + +uptr TagMemoryAligned(uptr p, uptr size, tag_t tag) { + CHECK(IsAligned(p, kShadowAlignment)); + CHECK(IsAligned(size, kShadowAlignment)); + uptr shadow_start = MemToShadow(p); + uptr shadow_size = MemToShadowSize(size); + + uptr page_size = GetPageSizeCached(); + uptr page_start = RoundUpTo(shadow_start, page_size); + uptr page_end = RoundDownTo(shadow_start + shadow_size, page_size); + uptr threshold = common_flags()->clear_shadow_mmap_threshold; + if (SANITIZER_LINUX && + UNLIKELY(page_end >= page_start + threshold && tag == 0)) { + internal_memset((void *)shadow_start, tag, page_start - shadow_start); + internal_memset((void *)page_end, tag, + shadow_start + shadow_size - page_end); + // For an anonymous private mapping MADV_DONTNEED will return a zero page on + // Linux. + ReleaseMemoryPagesToOSAndZeroFill(page_start, page_end); + } else { + internal_memset((void *)shadow_start, tag, shadow_size); + } + return AddTagToPointer(p, tag); +} + +uptr TagMemory(uptr p, uptr size, tag_t tag) { + uptr start = RoundDownTo(p, kShadowAlignment); + uptr end = RoundUpTo(p + size, kShadowAlignment); + return TagMemoryAligned(start, end - start, tag); +} + +} // namespace __hwasan Index: compiler-rt/trunk/lib/hwasan/hwasan_report.cc =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_report.cc +++ compiler-rt/trunk/lib/hwasan/hwasan_report.cc @@ -1,436 +0,0 @@ -//===-- hwasan_report.cc --------------------------------------------------===// -// -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception -// -//===----------------------------------------------------------------------===// -// -// This file is a part of HWAddressSanitizer. -// -// Error reporting. -//===----------------------------------------------------------------------===// - -#include "hwasan.h" -#include "hwasan_allocator.h" -#include "hwasan_mapping.h" -#include "hwasan_thread.h" -#include "hwasan_thread_list.h" -#include "sanitizer_common/sanitizer_allocator_internal.h" -#include "sanitizer_common/sanitizer_common.h" -#include "sanitizer_common/sanitizer_flags.h" -#include "sanitizer_common/sanitizer_mutex.h" -#include "sanitizer_common/sanitizer_report_decorator.h" -#include "sanitizer_common/sanitizer_stackdepot.h" -#include "sanitizer_common/sanitizer_stacktrace_printer.h" -#include "sanitizer_common/sanitizer_symbolizer.h" - -using namespace __sanitizer; - -namespace __hwasan { - -class ScopedReport { - public: - ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) { - BlockingMutexLock lock(&error_message_lock_); - error_message_ptr_ = fatal ? &error_message_ : nullptr; - ++hwasan_report_count; - } - - ~ScopedReport() { - { - BlockingMutexLock lock(&error_message_lock_); - if (fatal) - SetAbortMessage(error_message_.data()); - error_message_ptr_ = nullptr; - } - if (common_flags()->print_module_map >= 2 || - (fatal && common_flags()->print_module_map)) - DumpProcessMap(); - if (fatal) - Die(); - } - - static void MaybeAppendToErrorMessage(const char *msg) { - BlockingMutexLock lock(&error_message_lock_); - if (!error_message_ptr_) - return; - uptr len = internal_strlen(msg); - uptr old_size = error_message_ptr_->size(); - error_message_ptr_->resize(old_size + len); - // overwrite old trailing '\0', keep new trailing '\0' untouched. - internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len); - } - private: - ScopedErrorReportLock error_report_lock_; - InternalMmapVector error_message_; - bool fatal; - - static InternalMmapVector *error_message_ptr_; - static BlockingMutex error_message_lock_; -}; - -InternalMmapVector *ScopedReport::error_message_ptr_; -BlockingMutex ScopedReport::error_message_lock_; - -// If there is an active ScopedReport, append to its error message. -void AppendToErrorMessageBuffer(const char *buffer) { - ScopedReport::MaybeAppendToErrorMessage(buffer); -} - -static StackTrace GetStackTraceFromId(u32 id) { - CHECK(id); - StackTrace res = StackDepotGet(id); - CHECK(res.trace); - return res; -} - -// A RAII object that holds a copy of the current thread stack ring buffer. -// The actual stack buffer may change while we are iterating over it (for -// example, Printf may call syslog() which can itself be built with hwasan). -class SavedStackAllocations { - public: - SavedStackAllocations(StackAllocationsRingBuffer *rb) { - uptr size = rb->size() * sizeof(uptr); - void *storage = - MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations"); - new (&rb_) StackAllocationsRingBuffer(*rb, storage); - } - - ~SavedStackAllocations() { - StackAllocationsRingBuffer *rb = get(); - UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr)); - } - - StackAllocationsRingBuffer *get() { - return (StackAllocationsRingBuffer *)&rb_; - } - - private: - uptr rb_; -}; - -class Decorator: public __sanitizer::SanitizerCommonDecorator { - public: - Decorator() : SanitizerCommonDecorator() { } - const char *Access() { return Blue(); } - const char *Allocation() const { return Magenta(); } - const char *Origin() const { return Magenta(); } - const char *Name() const { return Green(); } - const char *Location() { return Green(); } - const char *Thread() { return Green(); } -}; - -// Returns the index of the rb element that matches tagged_addr (plus one), -// or zero if found nothing. -uptr FindHeapAllocation(HeapAllocationsRingBuffer *rb, - uptr tagged_addr, - HeapAllocationRecord *har) { - if (!rb) return 0; - for (uptr i = 0, size = rb->size(); i < size; i++) { - auto h = (*rb)[i]; - if (h.tagged_addr <= tagged_addr && - h.tagged_addr + h.requested_size > tagged_addr) { - *har = h; - return i + 1; - } - } - return 0; -} - -void PrintAddressDescription( - uptr tagged_addr, uptr access_size, - StackAllocationsRingBuffer *current_stack_allocations) { - Decorator d; - int num_descriptions_printed = 0; - uptr untagged_addr = UntagAddr(tagged_addr); - - // Print some very basic information about the address, if it's a heap. - HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr); - if (uptr beg = chunk.Beg()) { - uptr size = chunk.ActualSize(); - Printf("%s[%p,%p) is a %s %s heap chunk; " - "size: %zd offset: %zd\n%s", - d.Location(), - beg, beg + size, - chunk.FromSmallHeap() ? "small" : "large", - chunk.IsAllocated() ? "allocated" : "unallocated", - size, untagged_addr - beg, - d.Default()); - } - - // Check if this looks like a heap buffer overflow by scanning - // the shadow left and right and looking for the first adjacent - // object with a different memory tag. If that tag matches addr_tag, - // check the allocator if it has a live chunk there. - tag_t addr_tag = GetTagFromPointer(tagged_addr); - tag_t *tag_ptr = reinterpret_cast(MemToShadow(untagged_addr)); - if (*tag_ptr != addr_tag) { // should be true usually. - tag_t *left = tag_ptr, *right = tag_ptr; - // scan left. - for (int i = 0; i < 1000 && *left == *tag_ptr; i++, left--){} - // scan right. - for (int i = 0; i < 1000 && *right == *tag_ptr; i++, right++){} - // Chose the object that has addr_tag and that is closer to addr. - tag_t *candidate = nullptr; - if (*right == addr_tag && *left == addr_tag) - candidate = right - tag_ptr < tag_ptr - left ? right : left; - else if (*right == addr_tag) - candidate = right; - else if (*left == addr_tag) - candidate = left; - - if (candidate) { - uptr mem = ShadowToMem(reinterpret_cast(candidate)); - HwasanChunkView chunk = FindHeapChunkByAddress(mem); - if (chunk.IsAllocated()) { - Printf("%s", d.Location()); - Printf( - "%p is located %zd bytes to the %s of %zd-byte region [%p,%p)\n", - untagged_addr, - candidate == left ? untagged_addr - chunk.End() - : chunk.Beg() - untagged_addr, - candidate == right ? "left" : "right", chunk.UsedSize(), - chunk.Beg(), chunk.End()); - Printf("%s", d.Allocation()); - Printf("allocated here:\n"); - Printf("%s", d.Default()); - GetStackTraceFromId(chunk.GetAllocStackId()).Print(); - num_descriptions_printed++; - } - } - } - - hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { - // Scan all threads' ring buffers to find if it's a heap-use-after-free. - HeapAllocationRecord har; - if (uptr D = FindHeapAllocation(t->heap_allocations(), tagged_addr, &har)) { - Printf("%s", d.Location()); - Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n", - untagged_addr, untagged_addr - UntagAddr(har.tagged_addr), - har.requested_size, UntagAddr(har.tagged_addr), - UntagAddr(har.tagged_addr) + har.requested_size); - Printf("%s", d.Allocation()); - Printf("freed by thread T%zd here:\n", t->unique_id()); - Printf("%s", d.Default()); - GetStackTraceFromId(har.free_context_id).Print(); - - Printf("%s", d.Allocation()); - Printf("previously allocated here:\n", t); - Printf("%s", d.Default()); - GetStackTraceFromId(har.alloc_context_id).Print(); - - // Print a developer note: the index of this heap object - // in the thread's deallocation ring buffer. - Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", D, - flags()->heap_history_size); - - t->Announce(); - num_descriptions_printed++; - } - - // Very basic check for stack memory. - if (t->AddrIsInStack(untagged_addr)) { - Printf("%s", d.Location()); - Printf("Address %p is located in stack of thread T%zd\n", untagged_addr, - t->unique_id()); - Printf("%s", d.Default()); - t->Announce(); - - // Temporary report section, needs to be improved. - Printf("Previously allocated frames:\n"); - auto *sa = (t == GetCurrentThread() && current_stack_allocations) - ? current_stack_allocations - : t->stack_allocations(); - uptr frames = Min((uptr)flags()->stack_history_size, sa->size()); - InternalScopedString frame_desc(GetPageSizeCached() * 2); - for (uptr i = 0; i < frames; i++) { - uptr record = (*sa)[i]; - if (!record) - break; - uptr sp = (record >> 48) << 4; - uptr pc_mask = (1ULL << 48) - 1; - uptr pc = record & pc_mask; - if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) { - frame_desc.append(" sp: 0x%zx ", sp); - RenderFrame(&frame_desc, "#%n %p %F %L\n", 0, frame->info, - common_flags()->symbolize_vs_style, - common_flags()->strip_path_prefix); - frame->ClearAll(); - if (auto Descr = GetStackFrameDescr(pc)) - frame_desc.append(" %s\n", Descr); - } - Printf("%s", frame_desc.data()); - frame_desc.clear(); - } - - num_descriptions_printed++; - } - }); - - // Print the remaining threads, as an extra information, 1 line per thread. - hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); }); - - if (!num_descriptions_printed) - // We exhausted our possibilities. Bail out. - Printf("HWAddressSanitizer can not describe address in more detail.\n"); -} - -void ReportStats() {} - -static void PrintTagsAroundAddr(tag_t *tag_ptr) { - Printf( - "Memory tags around the buggy address (one tag corresponds to %zd " - "bytes):\n", kShadowAlignment); - - const uptr row_len = 16; // better be power of two. - const uptr num_rows = 17; - tag_t *center_row_beg = reinterpret_cast( - RoundDownTo(reinterpret_cast(tag_ptr), row_len)); - tag_t *beg_row = center_row_beg - row_len * (num_rows / 2); - tag_t *end_row = center_row_beg + row_len * (num_rows / 2); - InternalScopedString s(GetPageSizeCached() * 8); - for (tag_t *row = beg_row; row < end_row; row += row_len) { - s.append("%s", row == center_row_beg ? "=>" : " "); - for (uptr i = 0; i < row_len; i++) { - s.append("%s", row + i == tag_ptr ? "[" : " "); - s.append("%02x", row[i]); - s.append("%s", row + i == tag_ptr ? "]" : " "); - } - s.append("%s\n", row == center_row_beg ? "<=" : " "); - } - Printf("%s", s.data()); -} - -void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) { - ScopedReport R(flags()->halt_on_error); - - uptr untagged_addr = UntagAddr(tagged_addr); - tag_t ptr_tag = GetTagFromPointer(tagged_addr); - tag_t *tag_ptr = reinterpret_cast(MemToShadow(untagged_addr)); - tag_t mem_tag = *tag_ptr; - Decorator d; - Printf("%s", d.Error()); - uptr pc = stack->size ? stack->trace[0] : 0; - const char *bug_type = "invalid-free"; - Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type, - untagged_addr, pc); - Printf("%s", d.Access()); - Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag); - Printf("%s", d.Default()); - - stack->Print(); - - PrintAddressDescription(tagged_addr, 0, nullptr); - - PrintTagsAroundAddr(tag_ptr); - - ReportErrorSummary(bug_type, stack); -} - -void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size, - uptr tail_size, const u8 *expected) { - ScopedReport R(flags()->halt_on_error); - Decorator d; - uptr untagged_addr = UntagAddr(tagged_addr); - Printf("%s", d.Error()); - const char *bug_type = "alocation-tail-overwritten"; - Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName, - bug_type, untagged_addr, untagged_addr + orig_size, orig_size); - Printf("\n%s", d.Default()); - stack->Print(); - HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr); - if (chunk.Beg()) { - Printf("%s", d.Allocation()); - Printf("allocated here:\n"); - Printf("%s", d.Default()); - GetStackTraceFromId(chunk.GetAllocStackId()).Print(); - } - - InternalScopedString s(GetPageSizeCached() * 8); - CHECK_GT(tail_size, 0U); - CHECK_LT(tail_size, kShadowAlignment); - u8 *tail = reinterpret_cast(untagged_addr + orig_size); - s.append("Tail contains: "); - for (uptr i = 0; i < kShadowAlignment - tail_size; i++) - s.append(".. "); - for (uptr i = 0; i < tail_size; i++) - s.append("%02x ", tail[i]); - s.append("\n"); - s.append("Expected: "); - for (uptr i = 0; i < kShadowAlignment - tail_size; i++) - s.append(".. "); - for (uptr i = 0; i < tail_size; i++) - s.append("%02x ", expected[i]); - s.append("\n"); - s.append(" "); - for (uptr i = 0; i < kShadowAlignment - tail_size; i++) - s.append(" "); - for (uptr i = 0; i < tail_size; i++) - s.append("%s ", expected[i] != tail[i] ? "^^" : " "); - - s.append("\nThis error occurs when a buffer overflow overwrites memory\n" - "to the right of a heap object, but within the %zd-byte granule, e.g.\n" - " char *x = new char[20];\n" - " x[25] = 42;\n" - "By default %s does not detect such bugs at the time of write,\n" - "but can detect them at the time of free/delete.\n" - "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0;\n" - "To enable checking at the time of access, set " - "HWASAN_OPTIONS=malloc_align_right to non-zero\n\n", - kShadowAlignment, SanitizerToolName); - Printf("%s", s.data()); - GetCurrentThread()->Announce(); - - tag_t *tag_ptr = reinterpret_cast(MemToShadow(untagged_addr)); - PrintTagsAroundAddr(tag_ptr); - - ReportErrorSummary(bug_type, stack); -} - -void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size, - bool is_store, bool fatal) { - ScopedReport R(fatal); - SavedStackAllocations current_stack_allocations( - GetCurrentThread()->stack_allocations()); - - Decorator d; - Printf("%s", d.Error()); - uptr untagged_addr = UntagAddr(tagged_addr); - // TODO: when possible, try to print heap-use-after-free, etc. - const char *bug_type = "tag-mismatch"; - uptr pc = stack->size ? stack->trace[0] : 0; - Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type, - untagged_addr, pc); - - Thread *t = GetCurrentThread(); - - sptr offset = - __hwasan_test_shadow(reinterpret_cast(tagged_addr), access_size); - CHECK(offset >= 0 && offset < static_cast(access_size)); - tag_t ptr_tag = GetTagFromPointer(tagged_addr); - tag_t *tag_ptr = - reinterpret_cast(MemToShadow(untagged_addr + offset)); - tag_t mem_tag = *tag_ptr; - - Printf("%s", d.Access()); - Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n", - is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag, - mem_tag, t->unique_id()); - if (offset != 0) - Printf("Invalid access starting at offset [%zu, %zu)\n", offset, - Min(access_size, static_cast(offset) + (1 << kShadowScale))); - Printf("%s", d.Default()); - - stack->Print(); - - PrintAddressDescription(tagged_addr, access_size, - current_stack_allocations.get()); - t->Announce(); - - PrintTagsAroundAddr(tag_ptr); - - ReportErrorSummary(bug_type, stack); -} - -} // namespace __hwasan Index: compiler-rt/trunk/lib/hwasan/hwasan_report.cpp =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_report.cpp +++ compiler-rt/trunk/lib/hwasan/hwasan_report.cpp @@ -0,0 +1,436 @@ +//===-- hwasan_report.cpp -------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// +// This file is a part of HWAddressSanitizer. +// +// Error reporting. +//===----------------------------------------------------------------------===// + +#include "hwasan.h" +#include "hwasan_allocator.h" +#include "hwasan_mapping.h" +#include "hwasan_thread.h" +#include "hwasan_thread_list.h" +#include "sanitizer_common/sanitizer_allocator_internal.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_flags.h" +#include "sanitizer_common/sanitizer_mutex.h" +#include "sanitizer_common/sanitizer_report_decorator.h" +#include "sanitizer_common/sanitizer_stackdepot.h" +#include "sanitizer_common/sanitizer_stacktrace_printer.h" +#include "sanitizer_common/sanitizer_symbolizer.h" + +using namespace __sanitizer; + +namespace __hwasan { + +class ScopedReport { + public: + ScopedReport(bool fatal = false) : error_message_(1), fatal(fatal) { + BlockingMutexLock lock(&error_message_lock_); + error_message_ptr_ = fatal ? &error_message_ : nullptr; + ++hwasan_report_count; + } + + ~ScopedReport() { + { + BlockingMutexLock lock(&error_message_lock_); + if (fatal) + SetAbortMessage(error_message_.data()); + error_message_ptr_ = nullptr; + } + if (common_flags()->print_module_map >= 2 || + (fatal && common_flags()->print_module_map)) + DumpProcessMap(); + if (fatal) + Die(); + } + + static void MaybeAppendToErrorMessage(const char *msg) { + BlockingMutexLock lock(&error_message_lock_); + if (!error_message_ptr_) + return; + uptr len = internal_strlen(msg); + uptr old_size = error_message_ptr_->size(); + error_message_ptr_->resize(old_size + len); + // overwrite old trailing '\0', keep new trailing '\0' untouched. + internal_memcpy(&(*error_message_ptr_)[old_size - 1], msg, len); + } + private: + ScopedErrorReportLock error_report_lock_; + InternalMmapVector error_message_; + bool fatal; + + static InternalMmapVector *error_message_ptr_; + static BlockingMutex error_message_lock_; +}; + +InternalMmapVector *ScopedReport::error_message_ptr_; +BlockingMutex ScopedReport::error_message_lock_; + +// If there is an active ScopedReport, append to its error message. +void AppendToErrorMessageBuffer(const char *buffer) { + ScopedReport::MaybeAppendToErrorMessage(buffer); +} + +static StackTrace GetStackTraceFromId(u32 id) { + CHECK(id); + StackTrace res = StackDepotGet(id); + CHECK(res.trace); + return res; +} + +// A RAII object that holds a copy of the current thread stack ring buffer. +// The actual stack buffer may change while we are iterating over it (for +// example, Printf may call syslog() which can itself be built with hwasan). +class SavedStackAllocations { + public: + SavedStackAllocations(StackAllocationsRingBuffer *rb) { + uptr size = rb->size() * sizeof(uptr); + void *storage = + MmapAlignedOrDieOnFatalError(size, size * 2, "saved stack allocations"); + new (&rb_) StackAllocationsRingBuffer(*rb, storage); + } + + ~SavedStackAllocations() { + StackAllocationsRingBuffer *rb = get(); + UnmapOrDie(rb->StartOfStorage(), rb->size() * sizeof(uptr)); + } + + StackAllocationsRingBuffer *get() { + return (StackAllocationsRingBuffer *)&rb_; + } + + private: + uptr rb_; +}; + +class Decorator: public __sanitizer::SanitizerCommonDecorator { + public: + Decorator() : SanitizerCommonDecorator() { } + const char *Access() { return Blue(); } + const char *Allocation() const { return Magenta(); } + const char *Origin() const { return Magenta(); } + const char *Name() const { return Green(); } + const char *Location() { return Green(); } + const char *Thread() { return Green(); } +}; + +// Returns the index of the rb element that matches tagged_addr (plus one), +// or zero if found nothing. +uptr FindHeapAllocation(HeapAllocationsRingBuffer *rb, + uptr tagged_addr, + HeapAllocationRecord *har) { + if (!rb) return 0; + for (uptr i = 0, size = rb->size(); i < size; i++) { + auto h = (*rb)[i]; + if (h.tagged_addr <= tagged_addr && + h.tagged_addr + h.requested_size > tagged_addr) { + *har = h; + return i + 1; + } + } + return 0; +} + +void PrintAddressDescription( + uptr tagged_addr, uptr access_size, + StackAllocationsRingBuffer *current_stack_allocations) { + Decorator d; + int num_descriptions_printed = 0; + uptr untagged_addr = UntagAddr(tagged_addr); + + // Print some very basic information about the address, if it's a heap. + HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr); + if (uptr beg = chunk.Beg()) { + uptr size = chunk.ActualSize(); + Printf("%s[%p,%p) is a %s %s heap chunk; " + "size: %zd offset: %zd\n%s", + d.Location(), + beg, beg + size, + chunk.FromSmallHeap() ? "small" : "large", + chunk.IsAllocated() ? "allocated" : "unallocated", + size, untagged_addr - beg, + d.Default()); + } + + // Check if this looks like a heap buffer overflow by scanning + // the shadow left and right and looking for the first adjacent + // object with a different memory tag. If that tag matches addr_tag, + // check the allocator if it has a live chunk there. + tag_t addr_tag = GetTagFromPointer(tagged_addr); + tag_t *tag_ptr = reinterpret_cast(MemToShadow(untagged_addr)); + if (*tag_ptr != addr_tag) { // should be true usually. + tag_t *left = tag_ptr, *right = tag_ptr; + // scan left. + for (int i = 0; i < 1000 && *left == *tag_ptr; i++, left--){} + // scan right. + for (int i = 0; i < 1000 && *right == *tag_ptr; i++, right++){} + // Chose the object that has addr_tag and that is closer to addr. + tag_t *candidate = nullptr; + if (*right == addr_tag && *left == addr_tag) + candidate = right - tag_ptr < tag_ptr - left ? right : left; + else if (*right == addr_tag) + candidate = right; + else if (*left == addr_tag) + candidate = left; + + if (candidate) { + uptr mem = ShadowToMem(reinterpret_cast(candidate)); + HwasanChunkView chunk = FindHeapChunkByAddress(mem); + if (chunk.IsAllocated()) { + Printf("%s", d.Location()); + Printf( + "%p is located %zd bytes to the %s of %zd-byte region [%p,%p)\n", + untagged_addr, + candidate == left ? untagged_addr - chunk.End() + : chunk.Beg() - untagged_addr, + candidate == right ? "left" : "right", chunk.UsedSize(), + chunk.Beg(), chunk.End()); + Printf("%s", d.Allocation()); + Printf("allocated here:\n"); + Printf("%s", d.Default()); + GetStackTraceFromId(chunk.GetAllocStackId()).Print(); + num_descriptions_printed++; + } + } + } + + hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { + // Scan all threads' ring buffers to find if it's a heap-use-after-free. + HeapAllocationRecord har; + if (uptr D = FindHeapAllocation(t->heap_allocations(), tagged_addr, &har)) { + Printf("%s", d.Location()); + Printf("%p is located %zd bytes inside of %zd-byte region [%p,%p)\n", + untagged_addr, untagged_addr - UntagAddr(har.tagged_addr), + har.requested_size, UntagAddr(har.tagged_addr), + UntagAddr(har.tagged_addr) + har.requested_size); + Printf("%s", d.Allocation()); + Printf("freed by thread T%zd here:\n", t->unique_id()); + Printf("%s", d.Default()); + GetStackTraceFromId(har.free_context_id).Print(); + + Printf("%s", d.Allocation()); + Printf("previously allocated here:\n", t); + Printf("%s", d.Default()); + GetStackTraceFromId(har.alloc_context_id).Print(); + + // Print a developer note: the index of this heap object + // in the thread's deallocation ring buffer. + Printf("hwasan_dev_note_heap_rb_distance: %zd %zd\n", D, + flags()->heap_history_size); + + t->Announce(); + num_descriptions_printed++; + } + + // Very basic check for stack memory. + if (t->AddrIsInStack(untagged_addr)) { + Printf("%s", d.Location()); + Printf("Address %p is located in stack of thread T%zd\n", untagged_addr, + t->unique_id()); + Printf("%s", d.Default()); + t->Announce(); + + // Temporary report section, needs to be improved. + Printf("Previously allocated frames:\n"); + auto *sa = (t == GetCurrentThread() && current_stack_allocations) + ? current_stack_allocations + : t->stack_allocations(); + uptr frames = Min((uptr)flags()->stack_history_size, sa->size()); + InternalScopedString frame_desc(GetPageSizeCached() * 2); + for (uptr i = 0; i < frames; i++) { + uptr record = (*sa)[i]; + if (!record) + break; + uptr sp = (record >> 48) << 4; + uptr pc_mask = (1ULL << 48) - 1; + uptr pc = record & pc_mask; + if (SymbolizedStack *frame = Symbolizer::GetOrInit()->SymbolizePC(pc)) { + frame_desc.append(" sp: 0x%zx ", sp); + RenderFrame(&frame_desc, "#%n %p %F %L\n", 0, frame->info, + common_flags()->symbolize_vs_style, + common_flags()->strip_path_prefix); + frame->ClearAll(); + if (auto Descr = GetStackFrameDescr(pc)) + frame_desc.append(" %s\n", Descr); + } + Printf("%s", frame_desc.data()); + frame_desc.clear(); + } + + num_descriptions_printed++; + } + }); + + // Print the remaining threads, as an extra information, 1 line per thread. + hwasanThreadList().VisitAllLiveThreads([&](Thread *t) { t->Announce(); }); + + if (!num_descriptions_printed) + // We exhausted our possibilities. Bail out. + Printf("HWAddressSanitizer can not describe address in more detail.\n"); +} + +void ReportStats() {} + +static void PrintTagsAroundAddr(tag_t *tag_ptr) { + Printf( + "Memory tags around the buggy address (one tag corresponds to %zd " + "bytes):\n", kShadowAlignment); + + const uptr row_len = 16; // better be power of two. + const uptr num_rows = 17; + tag_t *center_row_beg = reinterpret_cast( + RoundDownTo(reinterpret_cast(tag_ptr), row_len)); + tag_t *beg_row = center_row_beg - row_len * (num_rows / 2); + tag_t *end_row = center_row_beg + row_len * (num_rows / 2); + InternalScopedString s(GetPageSizeCached() * 8); + for (tag_t *row = beg_row; row < end_row; row += row_len) { + s.append("%s", row == center_row_beg ? "=>" : " "); + for (uptr i = 0; i < row_len; i++) { + s.append("%s", row + i == tag_ptr ? "[" : " "); + s.append("%02x", row[i]); + s.append("%s", row + i == tag_ptr ? "]" : " "); + } + s.append("%s\n", row == center_row_beg ? "<=" : " "); + } + Printf("%s", s.data()); +} + +void ReportInvalidFree(StackTrace *stack, uptr tagged_addr) { + ScopedReport R(flags()->halt_on_error); + + uptr untagged_addr = UntagAddr(tagged_addr); + tag_t ptr_tag = GetTagFromPointer(tagged_addr); + tag_t *tag_ptr = reinterpret_cast(MemToShadow(untagged_addr)); + tag_t mem_tag = *tag_ptr; + Decorator d; + Printf("%s", d.Error()); + uptr pc = stack->size ? stack->trace[0] : 0; + const char *bug_type = "invalid-free"; + Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type, + untagged_addr, pc); + Printf("%s", d.Access()); + Printf("tags: %02x/%02x (ptr/mem)\n", ptr_tag, mem_tag); + Printf("%s", d.Default()); + + stack->Print(); + + PrintAddressDescription(tagged_addr, 0, nullptr); + + PrintTagsAroundAddr(tag_ptr); + + ReportErrorSummary(bug_type, stack); +} + +void ReportTailOverwritten(StackTrace *stack, uptr tagged_addr, uptr orig_size, + uptr tail_size, const u8 *expected) { + ScopedReport R(flags()->halt_on_error); + Decorator d; + uptr untagged_addr = UntagAddr(tagged_addr); + Printf("%s", d.Error()); + const char *bug_type = "alocation-tail-overwritten"; + Report("ERROR: %s: %s; heap object [%p,%p) of size %zd\n", SanitizerToolName, + bug_type, untagged_addr, untagged_addr + orig_size, orig_size); + Printf("\n%s", d.Default()); + stack->Print(); + HwasanChunkView chunk = FindHeapChunkByAddress(untagged_addr); + if (chunk.Beg()) { + Printf("%s", d.Allocation()); + Printf("allocated here:\n"); + Printf("%s", d.Default()); + GetStackTraceFromId(chunk.GetAllocStackId()).Print(); + } + + InternalScopedString s(GetPageSizeCached() * 8); + CHECK_GT(tail_size, 0U); + CHECK_LT(tail_size, kShadowAlignment); + u8 *tail = reinterpret_cast(untagged_addr + orig_size); + s.append("Tail contains: "); + for (uptr i = 0; i < kShadowAlignment - tail_size; i++) + s.append(".. "); + for (uptr i = 0; i < tail_size; i++) + s.append("%02x ", tail[i]); + s.append("\n"); + s.append("Expected: "); + for (uptr i = 0; i < kShadowAlignment - tail_size; i++) + s.append(".. "); + for (uptr i = 0; i < tail_size; i++) + s.append("%02x ", expected[i]); + s.append("\n"); + s.append(" "); + for (uptr i = 0; i < kShadowAlignment - tail_size; i++) + s.append(" "); + for (uptr i = 0; i < tail_size; i++) + s.append("%s ", expected[i] != tail[i] ? "^^" : " "); + + s.append("\nThis error occurs when a buffer overflow overwrites memory\n" + "to the right of a heap object, but within the %zd-byte granule, e.g.\n" + " char *x = new char[20];\n" + " x[25] = 42;\n" + "By default %s does not detect such bugs at the time of write,\n" + "but can detect them at the time of free/delete.\n" + "To disable this feature set HWASAN_OPTIONS=free_checks_tail_magic=0;\n" + "To enable checking at the time of access, set " + "HWASAN_OPTIONS=malloc_align_right to non-zero\n\n", + kShadowAlignment, SanitizerToolName); + Printf("%s", s.data()); + GetCurrentThread()->Announce(); + + tag_t *tag_ptr = reinterpret_cast(MemToShadow(untagged_addr)); + PrintTagsAroundAddr(tag_ptr); + + ReportErrorSummary(bug_type, stack); +} + +void ReportTagMismatch(StackTrace *stack, uptr tagged_addr, uptr access_size, + bool is_store, bool fatal) { + ScopedReport R(fatal); + SavedStackAllocations current_stack_allocations( + GetCurrentThread()->stack_allocations()); + + Decorator d; + Printf("%s", d.Error()); + uptr untagged_addr = UntagAddr(tagged_addr); + // TODO: when possible, try to print heap-use-after-free, etc. + const char *bug_type = "tag-mismatch"; + uptr pc = stack->size ? stack->trace[0] : 0; + Report("ERROR: %s: %s on address %p at pc %p\n", SanitizerToolName, bug_type, + untagged_addr, pc); + + Thread *t = GetCurrentThread(); + + sptr offset = + __hwasan_test_shadow(reinterpret_cast(tagged_addr), access_size); + CHECK(offset >= 0 && offset < static_cast(access_size)); + tag_t ptr_tag = GetTagFromPointer(tagged_addr); + tag_t *tag_ptr = + reinterpret_cast(MemToShadow(untagged_addr + offset)); + tag_t mem_tag = *tag_ptr; + + Printf("%s", d.Access()); + Printf("%s of size %zu at %p tags: %02x/%02x (ptr/mem) in thread T%zd\n", + is_store ? "WRITE" : "READ", access_size, untagged_addr, ptr_tag, + mem_tag, t->unique_id()); + if (offset != 0) + Printf("Invalid access starting at offset [%zu, %zu)\n", offset, + Min(access_size, static_cast(offset) + (1 << kShadowScale))); + Printf("%s", d.Default()); + + stack->Print(); + + PrintAddressDescription(tagged_addr, access_size, + current_stack_allocations.get()); + t->Announce(); + + PrintTagsAroundAddr(tag_ptr); + + ReportErrorSummary(bug_type, stack); +} + +} // namespace __hwasan Index: compiler-rt/trunk/lib/hwasan/hwasan_thread.cc =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_thread.cc +++ compiler-rt/trunk/lib/hwasan/hwasan_thread.cc @@ -1,122 +0,0 @@ - -#include "hwasan.h" -#include "hwasan_mapping.h" -#include "hwasan_thread.h" -#include "hwasan_poisoning.h" -#include "hwasan_interface_internal.h" - -#include "sanitizer_common/sanitizer_file.h" -#include "sanitizer_common/sanitizer_placement_new.h" -#include "sanitizer_common/sanitizer_tls_get_addr.h" - - -namespace __hwasan { - -static u32 RandomSeed() { - u32 seed; - do { - if (UNLIKELY(!GetRandom(reinterpret_cast(&seed), sizeof(seed), - /*blocking=*/false))) { - seed = static_cast( - (NanoTime() >> 12) ^ - (reinterpret_cast(__builtin_frame_address(0)) >> 4)); - } - } while (!seed); - return seed; -} - -void Thread::InitRandomState() { - random_state_ = flags()->random_tags ? RandomSeed() : unique_id_; -} - -void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) { - static u64 unique_id; - unique_id_ = unique_id++; - if (auto sz = flags()->heap_history_size) - heap_allocations_ = HeapAllocationsRingBuffer::New(sz); - - HwasanTSDThreadInit(); // Only needed with interceptors. - uptr *ThreadLong = GetCurrentThreadLongPtr(); - // The following implicitly sets (this) as the current thread. - stack_allocations_ = new (ThreadLong) - StackAllocationsRingBuffer((void *)stack_buffer_start, stack_buffer_size); - // Check that it worked. - CHECK_EQ(GetCurrentThread(), this); - - // ScopedTaggingDisable needs GetCurrentThread to be set up. - ScopedTaggingDisabler disabler; - - uptr tls_size; - uptr stack_size; - GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_, - &tls_size); - stack_top_ = stack_bottom_ + stack_size; - tls_end_ = tls_begin_ + tls_size; - - if (stack_bottom_) { - int local; - CHECK(AddrIsInStack((uptr)&local)); - CHECK(MemIsApp(stack_bottom_)); - CHECK(MemIsApp(stack_top_ - 1)); - } - - if (flags()->verbose_threads) { - if (IsMainThread()) { - Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n", - sizeof(Thread), heap_allocations_->SizeInBytes(), - stack_allocations_->size() * sizeof(uptr)); - } - Print("Creating : "); - } -} - -void Thread::ClearShadowForThreadStackAndTLS() { - if (stack_top_ != stack_bottom_) - TagMemory(stack_bottom_, stack_top_ - stack_bottom_, 0); - if (tls_begin_ != tls_end_) - TagMemory(tls_begin_, tls_end_ - tls_begin_, 0); -} - -void Thread::Destroy() { - if (flags()->verbose_threads) - Print("Destroying: "); - AllocatorSwallowThreadLocalCache(allocator_cache()); - ClearShadowForThreadStackAndTLS(); - if (heap_allocations_) - heap_allocations_->Delete(); - DTLS_Destroy(); -} - -void Thread::Print(const char *Prefix) { - Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, - unique_id_, this, stack_bottom(), stack_top(), - stack_top() - stack_bottom(), - tls_begin(), tls_end()); -} - -static u32 xorshift(u32 state) { - state ^= state << 13; - state ^= state >> 17; - state ^= state << 5; - return state; -} - -// Generate a (pseudo-)random non-zero tag. -tag_t Thread::GenerateRandomTag() { - if (tagging_disabled_) return 0; - tag_t tag; - do { - if (flags()->random_tags) { - if (!random_buffer_) - random_buffer_ = random_state_ = xorshift(random_state_); - CHECK(random_buffer_); - tag = random_buffer_ & 0xFF; - random_buffer_ >>= 8; - } else { - tag = random_state_ = (random_state_ + 1) & 0xFF; - } - } while (!tag); - return tag; -} - -} // namespace __hwasan Index: compiler-rt/trunk/lib/hwasan/hwasan_thread.cpp =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_thread.cpp +++ compiler-rt/trunk/lib/hwasan/hwasan_thread.cpp @@ -0,0 +1,122 @@ + +#include "hwasan.h" +#include "hwasan_mapping.h" +#include "hwasan_thread.h" +#include "hwasan_poisoning.h" +#include "hwasan_interface_internal.h" + +#include "sanitizer_common/sanitizer_file.h" +#include "sanitizer_common/sanitizer_placement_new.h" +#include "sanitizer_common/sanitizer_tls_get_addr.h" + + +namespace __hwasan { + +static u32 RandomSeed() { + u32 seed; + do { + if (UNLIKELY(!GetRandom(reinterpret_cast(&seed), sizeof(seed), + /*blocking=*/false))) { + seed = static_cast( + (NanoTime() >> 12) ^ + (reinterpret_cast(__builtin_frame_address(0)) >> 4)); + } + } while (!seed); + return seed; +} + +void Thread::InitRandomState() { + random_state_ = flags()->random_tags ? RandomSeed() : unique_id_; +} + +void Thread::Init(uptr stack_buffer_start, uptr stack_buffer_size) { + static u64 unique_id; + unique_id_ = unique_id++; + if (auto sz = flags()->heap_history_size) + heap_allocations_ = HeapAllocationsRingBuffer::New(sz); + + HwasanTSDThreadInit(); // Only needed with interceptors. + uptr *ThreadLong = GetCurrentThreadLongPtr(); + // The following implicitly sets (this) as the current thread. + stack_allocations_ = new (ThreadLong) + StackAllocationsRingBuffer((void *)stack_buffer_start, stack_buffer_size); + // Check that it worked. + CHECK_EQ(GetCurrentThread(), this); + + // ScopedTaggingDisable needs GetCurrentThread to be set up. + ScopedTaggingDisabler disabler; + + uptr tls_size; + uptr stack_size; + GetThreadStackAndTls(IsMainThread(), &stack_bottom_, &stack_size, &tls_begin_, + &tls_size); + stack_top_ = stack_bottom_ + stack_size; + tls_end_ = tls_begin_ + tls_size; + + if (stack_bottom_) { + int local; + CHECK(AddrIsInStack((uptr)&local)); + CHECK(MemIsApp(stack_bottom_)); + CHECK(MemIsApp(stack_top_ - 1)); + } + + if (flags()->verbose_threads) { + if (IsMainThread()) { + Printf("sizeof(Thread): %zd sizeof(HeapRB): %zd sizeof(StackRB): %zd\n", + sizeof(Thread), heap_allocations_->SizeInBytes(), + stack_allocations_->size() * sizeof(uptr)); + } + Print("Creating : "); + } +} + +void Thread::ClearShadowForThreadStackAndTLS() { + if (stack_top_ != stack_bottom_) + TagMemory(stack_bottom_, stack_top_ - stack_bottom_, 0); + if (tls_begin_ != tls_end_) + TagMemory(tls_begin_, tls_end_ - tls_begin_, 0); +} + +void Thread::Destroy() { + if (flags()->verbose_threads) + Print("Destroying: "); + AllocatorSwallowThreadLocalCache(allocator_cache()); + ClearShadowForThreadStackAndTLS(); + if (heap_allocations_) + heap_allocations_->Delete(); + DTLS_Destroy(); +} + +void Thread::Print(const char *Prefix) { + Printf("%sT%zd %p stack: [%p,%p) sz: %zd tls: [%p,%p)\n", Prefix, + unique_id_, this, stack_bottom(), stack_top(), + stack_top() - stack_bottom(), + tls_begin(), tls_end()); +} + +static u32 xorshift(u32 state) { + state ^= state << 13; + state ^= state >> 17; + state ^= state << 5; + return state; +} + +// Generate a (pseudo-)random non-zero tag. +tag_t Thread::GenerateRandomTag() { + if (tagging_disabled_) return 0; + tag_t tag; + do { + if (flags()->random_tags) { + if (!random_buffer_) + random_buffer_ = random_state_ = xorshift(random_state_); + CHECK(random_buffer_); + tag = random_buffer_ & 0xFF; + random_buffer_ >>= 8; + } else { + tag = random_state_ = (random_state_ + 1) & 0xFF; + } + } while (!tag); + return tag; +} + +} // namespace __hwasan Index: compiler-rt/trunk/lib/hwasan/hwasan_thread_list.cc =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_thread_list.cc +++ compiler-rt/trunk/lib/hwasan/hwasan_thread_list.cc @@ -1,15 +0,0 @@ -#include "hwasan_thread_list.h" - -namespace __hwasan { -static ALIGNED(16) char thread_list_placeholder[sizeof(HwasanThreadList)]; -static HwasanThreadList *hwasan_thread_list; - -HwasanThreadList &hwasanThreadList() { return *hwasan_thread_list; } - -void InitThreadList(uptr storage, uptr size) { - CHECK(hwasan_thread_list == nullptr); - hwasan_thread_list = - new (thread_list_placeholder) HwasanThreadList(storage, size); -} - -} // namespace Index: compiler-rt/trunk/lib/hwasan/hwasan_thread_list.cpp =================================================================== --- compiler-rt/trunk/lib/hwasan/hwasan_thread_list.cpp +++ compiler-rt/trunk/lib/hwasan/hwasan_thread_list.cpp @@ -0,0 +1,15 @@ +#include "hwasan_thread_list.h" + +namespace __hwasan { +static ALIGNED(16) char thread_list_placeholder[sizeof(HwasanThreadList)]; +static HwasanThreadList *hwasan_thread_list; + +HwasanThreadList &hwasanThreadList() { return *hwasan_thread_list; } + +void InitThreadList(uptr storage, uptr size) { + CHECK(hwasan_thread_list == nullptr); + hwasan_thread_list = + new (thread_list_placeholder) HwasanThreadList(storage, size); +} + +} // namespace