Index: lib/sanitizer_common/sanitizer_fuchsia.cc =================================================================== --- lib/sanitizer_common/sanitizer_fuchsia.cc +++ lib/sanitizer_common/sanitizer_fuchsia.cc @@ -232,10 +232,15 @@ return DoAnonymousMmapOrDie(size, mem_type, false, false); } -// MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator. -// Instead of doing exactly what they say, we make MmapNoAccess actually -// just allocate a VMAR to reserve the address space. Then MmapFixedOrDie -// uses that VMAR instead of the root. +// MmapNoAccess and MmapFixedOrDie are used only by sanitizer_allocator +// and scudo_allocator. Instead of doing exactly what they say, we make +// MmapNoAccess actually just allocate a VMAR to reserve the address space. +// Then MmapFixedOrDie uses that VMAR instead of the root. +// TODO: Refactor this API to give callers precisely what they need. +// In this case, scudo_allocator is using MmapNoAccess+MmapFixed to get a +// single mapping with guard ranges around it, whereas the sanitizer +// allocator is doing a large allocation and then doling out portions of +// that range. These needs can probably be better managed. mx_handle_t allocator_vmar = MX_HANDLE_INVALID; uintptr_t allocator_vmar_base; @@ -243,7 +248,11 @@ void *MmapNoAccess(uptr size) { size = RoundUpTo(size, PAGE_SIZE); - CHECK_EQ(allocator_vmar, MX_HANDLE_INVALID); + if (allocator_vmar != MX_HANDLE_INVALID) { + _mx_handle_close(allocator_vmar); + allocator_vmar = MX_HANDLE_INVALID; + allocator_vmar_base = 0; + allocator_vmar_size = 0; uintptr_t base; mx_status_t status = _mx_vmar_allocate(_mx_vmar_root_self(), 0, size,