Index: lib/sanitizer_common/CMakeLists.txt =================================================================== --- lib/sanitizer_common/CMakeLists.txt +++ lib/sanitizer_common/CMakeLists.txt @@ -16,6 +16,8 @@ sanitizer_platform_limits_posix.cc sanitizer_posix.cc sanitizer_printf.cc + sanitizer_procmaps_common.cc + sanitizer_procmaps_freebsd.cc sanitizer_procmaps_linux.cc sanitizer_procmaps_mac.cc sanitizer_stackdepot.cc Index: lib/sanitizer_common/sanitizer_procmaps.h =================================================================== --- lib/sanitizer_common/sanitizer_procmaps.h +++ lib/sanitizer_common/sanitizer_procmaps.h @@ -86,6 +86,14 @@ // Returns code range for the specified module. bool GetCodeRangeForFile(const char *module, uptr *start, uptr *end); +// Reads process memory map in an OS-specific way. +void ReadProcMaps(ProcSelfMapsBuff *proc_maps); + +bool IsHex(char c); +uptr ReadHex(const char *p); +bool IsDecimal(char c); +uptr ReadDecimal(const char *p); + } // namespace __sanitizer #endif // SANITIZER_PROCMAPS_H Index: lib/sanitizer_common/sanitizer_procmaps_common.cc =================================================================== --- lib/sanitizer_common/sanitizer_procmaps_common.cc +++ lib/sanitizer_common/sanitizer_procmaps_common.cc @@ -0,0 +1,165 @@ +//===-- sanitizer_procmaps_common.cc --------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Information about the process mappings (common parts). +//===----------------------------------------------------------------------===// + +#include "sanitizer_platform.h" +#if SANITIZER_FREEBSD || SANITIZER_LINUX +#include "sanitizer_common.h" +#include "sanitizer_placement_new.h" +#include "sanitizer_procmaps.h" + +namespace __sanitizer { + +// Linker initialized. +ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_; +StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized. + +bool IsHex(char c) { + return (c >= '0' && c <= '9') + || (c >= 'a' && c <= 'f'); +} + +uptr ReadHex(const char *p) { + uptr v = 0; + for (; IsHex(p[0]); p++) { + if (p[0] >= '0' && p[0] <= '9') + v = v * 16 + p[0] - '0'; + else + v = v * 16 + p[0] - 'a' + 10; + } + return v; +} + +bool IsDecimal(char c) { + return c >= '0' && c <= '9'; +} + +uptr ReadDecimal(const char *p) { + uptr v = 0; + for (; IsDecimal(p[0]); p++) + v = v * 10 + p[0] - '0'; + return v; +} + +MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) { + ReadProcMaps(&proc_self_maps_); + if (cache_enabled) { + if (proc_self_maps_.mmaped_size == 0) { + LoadFromCache(); + CHECK_GT(proc_self_maps_.len, 0); + } + } else { + CHECK_GT(proc_self_maps_.mmaped_size, 0); + } + Reset(); + // FIXME: in the future we may want to cache the mappings on demand only. + if (cache_enabled) + CacheMemoryMappings(); +} + +MemoryMappingLayout::~MemoryMappingLayout() { + // Only unmap the buffer if it is different from the cached one. Otherwise + // it will be unmapped when the cache is refreshed. + if (proc_self_maps_.data != cached_proc_self_maps_.data) { + UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size); + } +} + +void MemoryMappingLayout::Reset() { + current_ = proc_self_maps_.data; +} + +// static +void MemoryMappingLayout::CacheMemoryMappings() { + SpinMutexLock l(&cache_lock_); + // Don't invalidate the cache if the mappings are unavailable. + ProcSelfMapsBuff old_proc_self_maps; + old_proc_self_maps = cached_proc_self_maps_; + ReadProcMaps(&cached_proc_self_maps_); + if (cached_proc_self_maps_.mmaped_size == 0) { + cached_proc_self_maps_ = old_proc_self_maps; + } else { + if (old_proc_self_maps.mmaped_size) { + UnmapOrDie(old_proc_self_maps.data, + old_proc_self_maps.mmaped_size); + } + } +} + +void MemoryMappingLayout::LoadFromCache() { + SpinMutexLock l(&cache_lock_); + if (cached_proc_self_maps_.data) { + proc_self_maps_ = cached_proc_self_maps_; + } +} + +uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules, + uptr max_modules, + string_predicate_t filter) { + Reset(); + uptr cur_beg, cur_end, cur_offset; + InternalScopedBuffer module_name(kMaxPathLength); + uptr n_modules = 0; + for (uptr i = 0; n_modules < max_modules && + Next(&cur_beg, &cur_end, &cur_offset, module_name.data(), + module_name.size(), 0); + i++) { + const char *cur_name = module_name.data(); + if (cur_name[0] == '\0') + continue; + if (filter && !filter(cur_name)) + continue; + void *mem = &modules[n_modules]; + // Don't subtract 'cur_beg' from the first entry: + // * If a binary is compiled w/o -pie, then the first entry in + // process maps is likely the binary itself (all dynamic libs + // are mapped higher in address space). For such a binary, + // instruction offset in binary coincides with the actual + // instruction address in virtual memory (as code section + // is mapped to a fixed memory range). + // * If a binary is compiled with -pie, all the modules are + // mapped high at address space (in particular, higher than + // shadow memory of the tool), so the module can't be the + // first entry. + uptr base_address = (i ? cur_beg : 0) - cur_offset; + LoadedModule *cur_module = new(mem) LoadedModule(cur_name, base_address); + cur_module->addAddressRange(cur_beg, cur_end); + n_modules++; + } + return n_modules; +} + +void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { + char *smaps = 0; + uptr smaps_cap = 0; + uptr smaps_len = ReadFileToBuffer("/proc/self/smaps", + &smaps, &smaps_cap, 64<<20); + uptr start = 0; + bool file = false; + const char *pos = smaps; + while (pos < smaps + smaps_len) { + if (IsHex(pos[0])) { + start = ReadHex(pos); + for (; *pos != '/' && *pos > '\n'; pos++) {} + file = *pos == '/'; + } else if (internal_strncmp(pos, "Rss:", 4) == 0) { + for (; *pos < '0' || *pos > '9'; pos++) {} + uptr rss = ReadDecimal(pos) * 1024; + cb(start, rss, file, stats, stats_size); + } + while (*pos++ != '\n') {} + } + UnmapOrDie(smaps, smaps_cap); +} + +} // namespace __sanitizer + +#endif // SANITIZER_FREEBSD || SANITIZER_LINUX Index: lib/sanitizer_common/sanitizer_procmaps_freebsd.cc =================================================================== --- lib/sanitizer_common/sanitizer_procmaps_freebsd.cc +++ lib/sanitizer_common/sanitizer_procmaps_freebsd.cc @@ -0,0 +1,258 @@ +//===-- sanitizer_procmaps_freebsd.cc -------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// Information about the process mappings (FreeBSD-specific parts). +//===----------------------------------------------------------------------===// + +#include "sanitizer_platform.h" +#if SANITIZER_FREEBSD +#include "sanitizer_common.h" +#include "sanitizer_procmaps.h" + +#include +#include +#include + +// On x86-64 FreeBSD prior to v9.2 and v10.0 sysctl(KERN_PROC_VMMAP) takes +// too long to perform so we have to use a workaround that reads '/dev/kmem' +// directly. +#define SANITIZER_USE_FREEBSD_MMAP_WORKAROUND (SANITIZER_WORDSIZE == 64) + +#if SANITIZER_USE_FREEBSD_MMAP_WORKAROUND +#define _KVM_VNODE // To declare 'struct vnode'. +#include +#include +#endif + +namespace __sanitizer { + +#if SANITIZER_USE_FREEBSD_MMAP_WORKAROUND +static void HandleKmemFileError(const char *func) { + Report("ERROR: %s('/dev/kmem') failed, errno %d\n", func, errno); + Die(); +} + +static uptr OpenKmemFile() { + uptr Fd = OpenFile("/dev/kmem", /* write= */ false); + if (internal_iserror(Fd)) + HandleKmemFileError("open"); + return Fd; +} + +static void CloseKmemFile(int fd) { + if (internal_close(fd) != 0) + HandleKmemFileError("close"); +} + +static void ReadKmemFile(void *dest, int fd, const void *addr, size_t size) { + errno = 0; + lseek(fd, (off_t) addr, SEEK_SET); + // Since 'p' may have its high-order bit raised, the returning value may be + // negative; so, we check 'errno' instead of considering returning value. + if (errno != 0) + HandleKmemFileError("seek"); + + if (read(fd, dest, size) != (ssize_t) size) + HandleKmemFileError("read"); +} + +// From /usr/src/sys/kern/vfs_cache.c . +struct namecache { + LIST_ENTRY(namecache) nc_hash; // hash chain + LIST_ENTRY(namecache) nc_src; // source vnode list + TAILQ_ENTRY(namecache) nc_dst; // destination vnode list + struct vnode *nc_dvp; // vnode of parent of name + struct vnode *nc_vp; // vnode the name refers to + u_char nc_flag; // flag bits + u_char nc_nlen; // length of name + char nc_name[1]; // segment name + NUL +}; + +static void ReadVmEntryFilePath(char *path, size_t path_size, + const struct vm_map_entry *entry) { + CHECK_NE(path, NULL); + CHECK_GT(path_size, 0); + + internal_memset((void*)path, '\0', path_size); + + // Path buffer size except the terminating NUL character. + const size_t PathRoom = path_size - 1; + if (PathRoom == 0) + return; + + const struct vm_object *OP = entry->object.vm_object; + if (OP == NULL) + return; + + uptr Fd = OpenKmemFile(); + + struct vm_object Object; + while (OP != NULL) { + ReadKmemFile(&Object, Fd, OP, sizeof(Object)); + OP = Object.backing_object; + } + + if (Object.type != OBJT_VNODE) + return; + + struct vnode *VP = (struct vnode*)Object.handle; + struct vnode Vnode; + struct namecache Namecache; + + while (VP != NULL) { + ReadKmemFile(&Vnode, Fd, VP, sizeof(Vnode)); + + struct namecache *Data = (struct namecache*)TAILQ_FIRST(&Vnode.v_cache_dst); + if (Data == NULL) + break; + + ReadKmemFile(&Namecache, Fd, Data, sizeof(Namecache)); + + size_t Len = Namecache.nc_nlen; + if (Len > 0) { + Len = Min(Len, PathRoom) + 1; // '+ 1' is for leading '/'. + if (Len < PathRoom) + internal_memmove((void*)(path + Len), path, PathRoom - Len); + + path[0] = '/'; + ReadKmemFile((void*)(path + 1), Fd, &Data->nc_name, Len - 1); + } + + VP = Namecache.nc_dvp; + } + + CloseKmemFile(Fd); +} +#endif + +void ReadProcMaps(ProcSelfMapsBuff *proc_maps) { +#if SANITIZER_USE_FREEBSD_MMAP_WORKAROUND + // Get process VM map. + const pid_t Pid = getpid(); + const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_PID, Pid }; + struct kinfo_proc Proc; + size_t ProcSize = sizeof(struct kinfo_proc); + int Err = sysctl(Mib, 4, &Proc, &ProcSize, NULL, 0); + CHECK_EQ(Err, 0); + CHECK_EQ(ProcSize, sizeof(struct kinfo_proc)); + + // Count VM entries. + uptr Fd = OpenKmemFile(); + const struct vm_map *KmemMap = &Proc.ki_vmspace->vm_map; + const struct vm_map_entry *Header = &KmemMap->header; + const struct vm_map_entry *EP; + struct vm_map_entry E; + + size_t Count = 0; + EP = Header; + do { + ReadKmemFile(&E, Fd, EP, sizeof(E)); + // Skip header entry that correspond to the whole range of available memory. + if (EP != Header) + Count++; + } while ((EP = E.next) != Header); + size_t Size = Count * sizeof(struct vm_map_entry); + + // Load VM entries. + size_t AllocatedCount = Count + 3; + size_t AllocatedSize = AllocatedCount * sizeof(struct vm_map_entry); + struct vm_map_entry *VmMap = + (struct vm_map_entry*)MmapOrDie(AllocatedSize, "ReadProcMaps()"); + size_t I = 0; + EP = Header; + do { + ReadKmemFile(&E, Fd, EP, sizeof(E)); + CHECK_LT(I, AllocatedCount); + if (EP != Header) + VmMap[I++] = E; + } while ((EP = E.next) != Header); + CHECK_LE(I, AllocatedCount); + + proc_maps->data = (char*)VmMap; + proc_maps->mmaped_size = AllocatedSize; + proc_maps->len = Size; + + CloseKmemFile(Fd); +#else + const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid() }; + size_t Size = 0; + int Err = sysctl(Mib, 4, NULL, &Size, NULL, 0); + CHECK_EQ(Err, 0); + CHECK_GT(Size, 0); + + size_t MmapedSize = Size * 4 / 3; + void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()"); + Size = MmapedSize; + Err = sysctl(Mib, 4, VmMap, &Size, NULL, 0); + CHECK_EQ(Err, 0); + + proc_maps->data = (char*)VmMap; + proc_maps->mmaped_size = MmapedSize; + proc_maps->len = Size; +#endif +} + +bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, + char filename[], uptr filename_size, + uptr *protection) { + char *last = proc_self_maps_.data + proc_self_maps_.len; + if (current_ >= last) return false; + uptr dummy; + if (!start) start = &dummy; + if (!end) end = &dummy; + if (!offset) offset = &dummy; + if (!protection) protection = &dummy; +#if SANITIZER_USE_FREEBSD_MMAP_WORKAROUND + const struct vm_map_entry *VmEntry = (struct vm_map_entry*)current_; + + *start = VmEntry->start; + *end = VmEntry->end; + *offset = VmEntry->offset; + + *protection = 0; + if ((VmEntry->protection & VM_PROT_READ) != 0) + *protection |= kProtectionRead; + if ((VmEntry->protection & VM_PROT_WRITE) != 0) + *protection |= kProtectionWrite; + if ((VmEntry->protection & VM_PROT_EXECUTE) != 0) + *protection |= kProtectionExecute; + + if (filename != NULL && filename_size > 0) + ReadVmEntryFilePath(filename, filename_size, VmEntry); + + current_ += sizeof(struct vm_map_entry); +#else // !SANITIZER_USE_FREEBSD_MMAP_WORKAROUND + struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry*)current_; + + *start = (uptr)VmEntry->kve_start; + *end = (uptr)VmEntry->kve_end; + *offset = (uptr)VmEntry->kve_offset; + + *protection = 0; + if ((VmEntry->kve_protection & KVME_PROT_READ) != 0) + *protection |= kProtectionRead; + if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0) + *protection |= kProtectionWrite; + if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0) + *protection |= kProtectionExecute; + + if (filename != NULL && filename_size > 0) { + internal_snprintf(filename, + Min(filename_size, (uptr)PATH_MAX), + "%s", VmEntry->kve_path); + } + + current_ += VmEntry->kve_structsize; +#endif // !SANITIZER_USE_FREEBSD_MMAP_WORKAROUND + return true; +} + +} // namespace __sanitizer + +#endif // SANITIZER_FREEBSD Index: lib/sanitizer_common/sanitizer_procmaps_linux.cc =================================================================== --- lib/sanitizer_common/sanitizer_procmaps_linux.cc +++ lib/sanitizer_common/sanitizer_procmaps_linux.cc @@ -11,99 +11,21 @@ //===----------------------------------------------------------------------===// #include "sanitizer_platform.h" -#if SANITIZER_FREEBSD || SANITIZER_LINUX +#if SANITIZER_LINUX #include "sanitizer_common.h" -#include "sanitizer_placement_new.h" #include "sanitizer_procmaps.h" -#if SANITIZER_FREEBSD -#include -#include -#include -#endif - namespace __sanitizer { // Linker initialized. ProcSelfMapsBuff MemoryMappingLayout::cached_proc_self_maps_; StaticSpinMutex MemoryMappingLayout::cache_lock_; // Linker initialized. -static void ReadProcMaps(ProcSelfMapsBuff *proc_maps) { -#if SANITIZER_FREEBSD - const int Mib[4] = { CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid() }; - size_t Size = 0; - int Err = sysctl(Mib, 4, NULL, &Size, NULL, 0); - CHECK_EQ(Err, 0); - CHECK_GT(Size, 0); - - size_t MmapedSize = Size * 4 / 3; - void *VmMap = MmapOrDie(MmapedSize, "ReadProcMaps()"); - Size = MmapedSize; - Err = sysctl(Mib, 4, VmMap, &Size, NULL, 0); - CHECK_EQ(Err, 0); - - proc_maps->data = (char*)VmMap; - proc_maps->mmaped_size = MmapedSize; - proc_maps->len = Size; -#else +void ReadProcMaps(ProcSelfMapsBuff *proc_maps) { proc_maps->len = ReadFileToBuffer("/proc/self/maps", &proc_maps->data, &proc_maps->mmaped_size, 1 << 26); -#endif -} - -MemoryMappingLayout::MemoryMappingLayout(bool cache_enabled) { - ReadProcMaps(&proc_self_maps_); - if (cache_enabled) { - if (proc_self_maps_.mmaped_size == 0) { - LoadFromCache(); - CHECK_GT(proc_self_maps_.len, 0); - } - } else { - CHECK_GT(proc_self_maps_.mmaped_size, 0); - } - Reset(); - // FIXME: in the future we may want to cache the mappings on demand only. - if (cache_enabled) - CacheMemoryMappings(); -} - -MemoryMappingLayout::~MemoryMappingLayout() { - // Only unmap the buffer if it is different from the cached one. Otherwise - // it will be unmapped when the cache is refreshed. - if (proc_self_maps_.data != cached_proc_self_maps_.data) { - UnmapOrDie(proc_self_maps_.data, proc_self_maps_.mmaped_size); - } -} - -void MemoryMappingLayout::Reset() { - current_ = proc_self_maps_.data; } -// static -void MemoryMappingLayout::CacheMemoryMappings() { - SpinMutexLock l(&cache_lock_); - // Don't invalidate the cache if the mappings are unavailable. - ProcSelfMapsBuff old_proc_self_maps; - old_proc_self_maps = cached_proc_self_maps_; - ReadProcMaps(&cached_proc_self_maps_); - if (cached_proc_self_maps_.mmaped_size == 0) { - cached_proc_self_maps_ = old_proc_self_maps; - } else { - if (old_proc_self_maps.mmaped_size) { - UnmapOrDie(old_proc_self_maps.data, - old_proc_self_maps.mmaped_size); - } - } -} - -void MemoryMappingLayout::LoadFromCache() { - SpinMutexLock l(&cache_lock_); - if (cached_proc_self_maps_.data) { - proc_self_maps_ = cached_proc_self_maps_; - } -} - -#if !SANITIZER_FREEBSD // Parse a hex value in str and update str. static uptr ParseHex(char **str) { uptr x = 0; @@ -128,34 +50,6 @@ static bool IsOneOf(char c, char c1, char c2) { return c == c1 || c == c2; } -#endif - -static bool IsDecimal(char c) { - return c >= '0' && c <= '9'; -} - -static bool IsHex(char c) { - return (c >= '0' && c <= '9') - || (c >= 'a' && c <= 'f'); -} - -static uptr ReadHex(const char *p) { - uptr v = 0; - for (; IsHex(p[0]); p++) { - if (p[0] >= '0' && p[0] <= '9') - v = v * 16 + p[0] - '0'; - else - v = v * 16 + p[0] - 'a' + 10; - } - return v; -} - -static uptr ReadDecimal(const char *p) { - uptr v = 0; - for (; IsDecimal(p[0]); p++) - v = v * 10 + p[0] - '0'; - return v; -} bool MemoryMappingLayout::Next(uptr *start, uptr *end, uptr *offset, char filename[], uptr filename_size, @@ -167,29 +61,6 @@ if (!end) end = &dummy; if (!offset) offset = &dummy; if (!protection) protection = &dummy; -#if SANITIZER_FREEBSD - struct kinfo_vmentry *VmEntry = (struct kinfo_vmentry*)current_; - - *start = (uptr)VmEntry->kve_start; - *end = (uptr)VmEntry->kve_end; - *offset = (uptr)VmEntry->kve_offset; - - *protection = 0; - if ((VmEntry->kve_protection & KVME_PROT_READ) != 0) - *protection |= kProtectionRead; - if ((VmEntry->kve_protection & KVME_PROT_WRITE) != 0) - *protection |= kProtectionWrite; - if ((VmEntry->kve_protection & KVME_PROT_EXEC) != 0) - *protection |= kProtectionExecute; - - if (filename != NULL && filename_size > 0) { - internal_snprintf(filename, - Min(filename_size, (uptr)PATH_MAX), - "%s", VmEntry->kve_path); - } - - current_ += VmEntry->kve_structsize; -#else // !SANITIZER_FREEBSD char *next_line = (char*)internal_memchr(current_, '\n', last - current_); if (next_line == 0) next_line = last; @@ -236,69 +107,9 @@ if (filename && i < filename_size) filename[i] = 0; current_ = next_line + 1; -#endif // !SANITIZER_FREEBSD return true; } -uptr MemoryMappingLayout::DumpListOfModules(LoadedModule *modules, - uptr max_modules, - string_predicate_t filter) { - Reset(); - uptr cur_beg, cur_end, cur_offset; - InternalScopedBuffer module_name(kMaxPathLength); - uptr n_modules = 0; - for (uptr i = 0; n_modules < max_modules && - Next(&cur_beg, &cur_end, &cur_offset, module_name.data(), - module_name.size(), 0); - i++) { - const char *cur_name = module_name.data(); - if (cur_name[0] == '\0') - continue; - if (filter && !filter(cur_name)) - continue; - void *mem = &modules[n_modules]; - // Don't subtract 'cur_beg' from the first entry: - // * If a binary is compiled w/o -pie, then the first entry in - // process maps is likely the binary itself (all dynamic libs - // are mapped higher in address space). For such a binary, - // instruction offset in binary coincides with the actual - // instruction address in virtual memory (as code section - // is mapped to a fixed memory range). - // * If a binary is compiled with -pie, all the modules are - // mapped high at address space (in particular, higher than - // shadow memory of the tool), so the module can't be the - // first entry. - uptr base_address = (i ? cur_beg : 0) - cur_offset; - LoadedModule *cur_module = new(mem) LoadedModule(cur_name, base_address); - cur_module->addAddressRange(cur_beg, cur_end); - n_modules++; - } - return n_modules; -} - -void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size) { - char *smaps = 0; - uptr smaps_cap = 0; - uptr smaps_len = ReadFileToBuffer("/proc/self/smaps", - &smaps, &smaps_cap, 64<<20); - uptr start = 0; - bool file = false; - const char *pos = smaps; - while (pos < smaps + smaps_len) { - if (IsHex(pos[0])) { - start = ReadHex(pos); - for (; *pos != '/' && *pos > '\n'; pos++) {} - file = *pos == '/'; - } else if (internal_strncmp(pos, "Rss:", 4) == 0) { - for (; *pos < '0' || *pos > '9'; pos++) {} - uptr rss = ReadDecimal(pos) * 1024; - cb(start, rss, file, stats, stats_size); - } - while (*pos++ != '\n') {} - } - UnmapOrDie(smaps, smaps_cap); -} - } // namespace __sanitizer -#endif // SANITIZER_FREEBSD || SANITIZER_LINUX +#endif // SANITIZER_LINUX