Index: compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cc =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cc +++ compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cc @@ -146,9 +146,9 @@ // indices, but we'll never move the mapping address so we don't have // any multi-thread synchronization issues with that. uintptr_t mapping; - status = _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo_, 0, MappingSize, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, - &mapping); + status = + _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, + 0, vmo_, 0, MappingSize, &mapping); CHECK_EQ(status, ZX_OK); // Hereafter other threads are free to start storing into Index: compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cc =================================================================== --- compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cc +++ compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cc @@ -172,8 +172,8 @@ // TODO(mcgrathr): Maybe allocate a VMAR for all sanitizer heap and use that? uintptr_t addr; status = - _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); + _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, + vmo, 0, size, &addr); _zx_handle_close(vmo); if (status != ZX_OK) { @@ -236,10 +236,9 @@ DCHECK_GE(base + size_, map_size + offset); uintptr_t addr; - status = _zx_vmar_map_old( - vmar, offset, vmo, 0, map_size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | ZX_VM_FLAG_SPECIFIC, - &addr); + status = + _zx_vmar_map(vmar, ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC, + offset, vmo, 0, map_size, &addr); _zx_handle_close(vmo); if (status != ZX_OK) { if (status != ZX_ERR_NO_MEMORY || die_for_nomem) { @@ -318,8 +317,8 @@ size_t map_size = size + alignment; uintptr_t addr; status = - _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, map_size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE, &addr); + _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ | ZX_VM_PERM_WRITE, 0, + vmo, 0, map_size, &addr); if (status == ZX_OK) { uintptr_t map_addr = addr; uintptr_t map_end = map_addr + map_size; @@ -331,11 +330,10 @@ sizeof(info), NULL, NULL); if (status == ZX_OK) { uintptr_t new_addr; - status = _zx_vmar_map_old(_zx_vmar_root_self(), addr - info.base, vmo, - 0, size, - ZX_VM_FLAG_PERM_READ | ZX_VM_FLAG_PERM_WRITE | - ZX_VM_FLAG_SPECIFIC_OVERWRITE, - &new_addr); + status = _zx_vmar_map( + _zx_vmar_root_self(), + ZX_VM_PERM_READ | ZX_VM_PERM_WRITE | ZX_VM_SPECIFIC_OVERWRITE, + addr - info.base, vmo, 0, size, &new_addr); if (status == ZX_OK) CHECK_EQ(new_addr, addr); } } @@ -395,8 +393,8 @@ if (vmo_size < max_len) max_len = vmo_size; size_t map_size = RoundUpTo(max_len, PAGE_SIZE); uintptr_t addr; - status = _zx_vmar_map_old(_zx_vmar_root_self(), 0, vmo, 0, map_size, - ZX_VM_FLAG_PERM_READ, &addr); + status = _zx_vmar_map(_zx_vmar_root_self(), ZX_VM_PERM_READ, 0, vmo, 0, + map_size, &addr); if (status == ZX_OK) { *buff = reinterpret_cast(addr); *buff_size = map_size;