Index: lib/interception/interception_win.h =================================================================== --- lib/interception/interception_win.h +++ lib/interception/interception_win.h @@ -42,6 +42,23 @@ const char *function_name, uptr new_function, uptr *orig_old_func); +#if !SANITIZER_WINDOWS64 +// Exposed for unittests +bool OverrideFunctionWithDetour( + uptr old_func, uptr new_func, uptr *orig_old_func); +#endif + +// Exposed for unittests +bool OverrideFunctionWithRedirectJump( + uptr old_func, uptr new_func, uptr *orig_old_func); +bool OverrideFunctionWithHotPatch( + uptr old_func, uptr new_func, uptr *orig_old_func); +bool OverrideFunctionWithTrampoline( + uptr old_func, uptr new_func, uptr *orig_old_func); + +// Exposed for unittests +void TestOnlyReleaseTrampolineRegions(); + } // namespace __interception #if defined(INTERCEPTION_DYNAMIC_CRT) Index: lib/interception/interception_win.cc =================================================================== --- lib/interception/interception_win.cc +++ lib/interception/interception_win.cc @@ -15,6 +15,8 @@ #ifdef _WIN32 #include "interception.h" +#include "sanitizer_common/sanitizer_common.h" +#include "sanitizer_common/sanitizer_platform.h" #define WIN32_LEAN_AND_MEAN #include @@ -35,268 +37,510 @@ dst_c[i] = src_c[i]; } -#if SANITIZER_WINDOWS64 -static void WriteIndirectJumpInstruction(char *jmp_from, uptr *indirect_target) { // NOLINT - // jmp [rip + XXYYZZWW] = FF 25 WW ZZ YY XX, where - // XXYYZZWW is an offset from jmp_from. - // The displacement is still 32-bit in x64, so indirect_target must be located - // within +/- 2GB range. - int offset = (int)(indirect_target - (uptr *)jmp_from); - jmp_from[0] = '\xFF'; - jmp_from[1] = '\x25'; - *(int*)(jmp_from + 2) = offset; +static bool ChangeMemoryProtection( + uptr address, uptr size, DWORD* old_protection) { + return ::VirtualProtect((void *)address, size, + PAGE_EXECUTE_READWRITE, + old_protection) != FALSE; +} + +static bool RestoreMemoryProtection( + uptr address, uptr size, DWORD old_protection) { + DWORD unused; + return ::VirtualProtect((void *)address, size, + old_protection, + &unused) != FALSE; +} + +static bool IsMemoryPadding(uptr address, uptr size) { + u8* function = (u8*)address; + for (size_t i = 0; i < size; ++i) + if (function[i] != 0x90 && function[i] != 0xCC) + return false; + return true; } -#else -static void WriteJumpInstruction(char *jmp_from, char *to) { - // jmp XXYYZZWW = E9 WW ZZ YY XX, where XXYYZZWW is an offset from jmp_from - // to the next instruction to the destination. - ptrdiff_t offset = to - jmp_from - 5; - *jmp_from = '\xE9'; - *(ptrdiff_t*)(jmp_from + 1) = offset; + +static void WritePadding(uptr from, uptr size) { + _memset((void*)from, 0xCC, (size_t)size); +} + +static void CopyInstructions(uptr from, uptr to, uptr size) { + _memcpy((void*)from, (void*)to, (size_t)size); +} + +static void WriteJumpInstruction(uptr from, uptr target) { + // jmp XXYYZZWW = E9 WW ZZ YY XX, where XXYYZZWW is a relative offset. + // The offset is the distance from the end of the jump instruction to the + // targeted instruction. + const int kInstructionLength = 5; + ptrdiff_t offset = target - from - kInstructionLength; + *(u8*)from = 0xE9; + *(u32*)(from + 1) = offset; +} + +static void WriteShortJumpInstruction(uptr from, uptr target) { + const int kInstructionLength = 2; + uptr offset = target - from - kInstructionLength; + *(u8*)from = 0xEB; + *(u8*)(from + 1) = (u8)offset; +} + +#if SANITIZER_WINDOWS64 +static void WriteIndirectJumpInstruction(uptr from, uptr indirect_target) { + // jmp [rip + XXYYZZWW] = FF 25 WW ZZ YY XX where XXYYZZWW is a relative + // offset. + // The offset is the distance from then end of the jump instruction to the + // memory location containing the targeted address. The displacement is still + // 32-bit in x64, so indirect_target must be located within +/- 2GB range. + const int kInstructionLength = 6; + int offset = indirect_target - from - kInstructionLength; + *(u16*)from = 0x25FF; + *(u32*)(from + 2) = offset; } #endif -static void WriteTrampolineJumpInstruction(char *jmp_from, char *to) { +static void WriteIndirectJumpToTarget( + uptr from, uptr indirect_target, uptr target) { #if SANITIZER_WINDOWS64 - // Emit an indirect jump through immediately following bytes: - // jmp_from: - // jmp [rip + 6] - // .quad to - // Store the address. - uptr *indirect_target = (uptr *)(jmp_from + 6); - *indirect_target = (uptr)to; // Write the indirect jump. - WriteIndirectJumpInstruction(jmp_from, indirect_target); + WriteIndirectJumpInstruction(from, indirect_target); + // Store the address. + *(u64*)indirect_target = target; #else - WriteJumpInstruction(jmp_from, to); + (void)indirect_target; + WriteJumpInstruction(from, target); #endif } -static void WriteInterceptorJumpInstruction(char *jmp_from, char *to) { +static void WriteDirectJumpInstruction(uptr from, uptr target) { #if SANITIZER_WINDOWS64 // Emit an indirect jump through immediately following bytes: - // jmp_from: - // jmp [rip - 8] - // .quad to - // Store the address. - uptr *indirect_target = (uptr *)(jmp_from - 8); - *indirect_target = (uptr)to; - // Write the indirect jump. - WriteIndirectJumpInstruction(jmp_from, indirect_target); + // jmp [rip + 6] + // .quad + const int kInstructionLength = 6; + WriteIndirectJumpToTarget(from, from + kInstructionLength, target); #else - WriteJumpInstruction(jmp_from, to); + WriteJumpInstruction(from, target); #endif } -static char *GetMemoryForTrampoline(size_t size) { - // Trampolines are allocated from a common pool. - const int POOL_SIZE = 1024; - static char *pool = NULL; - static size_t pool_used = 0; - if (!pool) { - pool = (char *)VirtualAlloc(NULL, POOL_SIZE, MEM_RESERVE | MEM_COMMIT, - PAGE_EXECUTE_READWRITE); - // FIXME: Might want to apply PAGE_EXECUTE_READ access after all the - // interceptors are in place. - if (!pool) - return NULL; - _memset(pool, 0xCC /* int 3 */, POOL_SIZE); +#if SANITIZER_WINDOWS64 +struct TrampolineMemoryRegion { + uptr content; + uptr allocated_size; + uptr max_size; +}; + +static const uptr kTrampolineScanLimitRange = (1000ULL) << 20; // 1 gig +static const int kMaxTrampolineyRegion = 1024; +static TrampolineMemoryRegion TrampolineRegions[kMaxTrampolineyRegion]; + +static void *AllocateTrampolineRegion(uptr image_address, size_t granularity) { + uptr address = image_address; + uptr scanned = 0; + while (scanned < kTrampolineScanLimitRange) { + MEMORY_BASIC_INFORMATION info; + if (!::VirtualQuery((void*)address, &info, sizeof(info))) + return nullptr; + + // Check whether a region can be allocated at |address|. + if (info.State == MEM_FREE && info.RegionSize >= granularity) { + void *page = ::VirtualAlloc((void*)RoundUpTo(address, granularity), + granularity, + MEM_RESERVE | MEM_COMMIT, + PAGE_EXECUTE_READWRITE); + return page; + } + + // Move to the next region. + address = (uptr)info.BaseAddress + info.RegionSize; + scanned += info.RegionSize; + } + return nullptr; +} + +// Used by unittests to release mapped memory space. +void TestOnlyReleaseTrampolineRegions() { + for (size_t bucket = 0; bucket < kMaxTrampolineyRegion; ++bucket) { + TrampolineMemoryRegion* current = &TrampolineRegions[bucket]; + ::VirtualFree((void*)current->content, 0, MEM_RELEASE); + current->content = 0; } +} - if (pool_used + size > POOL_SIZE) - return NULL; +static void *AllocateMemoryForTrampoline(uptr image_address, size_t size) { + // Find a region within 2G with enough space to allocate |size| bytes. + TrampolineMemoryRegion* region = nullptr; + for (size_t bucket = 0; bucket < kMaxTrampolineyRegion; ++bucket) { + TrampolineMemoryRegion* current = &TrampolineRegions[bucket]; + if (current->content == 0) { + // No valid region found, allocate a new region. + size_t bucket_size = GetMmapGranularity(); + void* content = AllocateTrampolineRegion(image_address, bucket_size); + if (content == nullptr) + return nullptr; + + current->content = (uptr)content; + current->allocated_size = 0; + current->max_size = bucket_size; + region = current; + break; + } else if (current->content > image_address && + current->content - image_address < 0x7FFFF0000 && + current->max_size - current->allocated_size > size) { + // The space can be allocated in the current region. + region = current; + break; + } + } + + // Failed to find a region. + if (region == nullptr) + return nullptr; + + // Allocate the space in the current region. + uptr allocated_space = region->content + region->allocated_size; + region->allocated_size += size; + return (void *)allocated_space; +} + +#else - char *ret = pool + pool_used; - pool_used += size; +// Trampolines are allocated the same region. +static const int kRegionSize = 4096; +static char *trampoline_memory = NULL; +static size_t trampoline_memory_used = 0; + +static char *AllocateMemoryForTrampoline(uptr image_address, size_t size) { + (void)image_address; + if (!trampoline_memory) { + trampoline_memory = (char *)VirtualAlloc(nullptr, kRegionSize, + MEM_RESERVE | MEM_COMMIT, + PAGE_EXECUTE_READWRITE); + if (!trampoline_memory) + return nullptr; + WritePadding((uptr)trampoline_memory, kRegionSize); + } + + if (trampoline_memory_used + size > kRegionSize) + return nullptr; + + char *ret = trampoline_memory + trampoline_memory_used; + trampoline_memory_used += size; return ret; } +// Used by unittests to release mapped memory space. +void TestOnlyReleaseTrampolineRegions() { + trampoline_memory_used = 0; +} + +#endif // SANITIZER_WINDOWS64 + // Returns 0 on error. -static size_t RoundUpToInstrBoundary(size_t size, char *code) { +static size_t GetInstructionSize(uptr address) { + switch (*(u8*)address) { + case 0x90: // 90 : nop + return 1; + + case 0x50: // push eax / rax + case 0x51: // push ecx / rcx + case 0x52: // push edx / rdx + case 0x53: // push ebx / rbx + case 0x54: // push esp / rsp + case 0x55: // push ebp / rbp + case 0x56: // push esi / rsi + case 0x57: // push edi / rdi + case 0x5D: // pop ebp / rbp + return 1; + + case 0x6A: // 6A XX = push XX + return 2; + + case 0xb8: // b8 XX XX XX XX : mov eax, XX XX XX XX + case 0xB9: // b9 XX XX XX XX : mov ecx, XX XX XX XX + return 5; + + case 0xE9: // E9 XX XX XX XX : jmp