Index: lib/sanitizer_common/sanitizer_coverage_fuchsia.cc =================================================================== --- lib/sanitizer_common/sanitizer_coverage_fuchsia.cc +++ lib/sanitizer_common/sanitizer_coverage_fuchsia.cc @@ -146,9 +146,9 @@ // indices, but we'll never move the mapping address so we don't have // any multi-thread synchronization issues with that. uintptr_t mapping; - status = - _zx_vmar_map(_zx_vmar_root_self(), 0, vmo_, 0, MappingSize, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &mapping); + status = _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo_, 0, MappingSize, + ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, + &mapping); CHECK_EQ(status, ZX_OK); // Hereafter other threads are free to start storing into Index: lib/sanitizer_common/sanitizer_fuchsia.cc =================================================================== --- lib/sanitizer_common/sanitizer_fuchsia.cc +++ lib/sanitizer_common/sanitizer_fuchsia.cc @@ -171,8 +171,9 @@ // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that? uintptr_t addr; - status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); + status = + _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, size, + ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); _zx_handle_close(vmo); if (status != ZX_OK) { @@ -206,10 +207,10 @@ uintptr_t base; zx_handle_t vmar; zx_status_t status = - _zx_vmar_allocate(_zx_vmar_root_self(), 0, init_size, - ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE | - ZX_VM_FLAG_CAN_MAP_SPECIFIC, - &vmar, &base); + _zx_vmar_allocate_old(_zx_vmar_root_self(), 0, init_size, + ZX_VM_FLAG_CAN_MAP_READ | ZX_VM_FLAG_CAN_MAP_WRITE | + ZX_VM_FLAG_CAN_MAP_SPECIFIC, + &vmar, &base); if (status != ZX_OK) ReportMmapFailureAndDie(init_size, name, "zx_vmar_allocate", status); base_ = reinterpret_cast(base); @@ -235,7 +236,7 @@ DCHECK_GE(base + size_, map_size + offset); uintptr_t addr; - status = _zx_vmar_map( + status = _zx_vmar_map_old( vmar, offset, vmo, 0, map_size, ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC, &addr); @@ -316,8 +317,9 @@ // beginning of the VMO, and unmap the excess before and after. size_t map_size = size + alignment; uintptr_t addr; - status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); + status = + _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, map_size, + ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); if (status == ZX_OK) { uintptr_t map_addr = addr; uintptr_t map_end = map_addr + map_size; @@ -329,11 +331,11 @@ sizeof(info), NULL, NULL); if (status == ZX_OK) { uintptr_t new_addr; - status = - _zx_vmar_map(_zx_vmar_root_self(), addr - info.base, vmo, 0, size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | - ZX_VM_FLAG_SPECIFIC_OVERWRITE, - &new_addr); + status = _zx_vmar_map_old(_zx_vmar_root_self(), addr - info.base, vmo, + 0, size, + ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | + ZX_VM_FLAG_SPECIFIC_OVERWRITE, + &new_addr); if (status == ZX_OK) CHECK_EQ(new_addr, addr); } } @@ -393,8 +395,8 @@ if (vmo_size < max_len) max_len = vmo_size; size_t map_size = RoundUpTo(max_len, PAGE_SIZE); uintptr_t addr; - status = _zx_vmar_map(_zx_vmar_root_self(), 0, vmo, 0, map_size, - ZX_VM_FLAG_PERM_READ, &addr); + status = _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, map_size, + ZX_VM_FLAG_PERM_READ, &addr); if (status == ZX_OK) { *buff = reinterpret_cast(addr); *buff_size = map_size;