Index: lib/scudo/standalone/CMakeLists.txt
===================================================================
--- lib/scudo/standalone/CMakeLists.txt
+++ lib/scudo/standalone/CMakeLists.txt
@@ -68,6 +68,7 @@
   mutex.h
   platform.h
   report.h
+  secondary.h
   stats.h
   string_utils.h
   vector.h)
Index: lib/scudo/standalone/common.h
===================================================================
--- lib/scudo/standalone/common.h
+++ lib/scudo/standalone/common.h
@@ -135,6 +135,10 @@
 
 // Platform memory mapping functions.
 
+typedef struct OpaquePlatformData {
+  uptr _[3];
+} OpaquePlatformData;
+
 #define MAP_ALLOWNOMEM (1U << 0)
 #define MAP_NOACCESS (1U << 1)
 #define MAP_RESIZABLE (1U << 2)
@@ -148,16 +152,17 @@
 // platform specific data to the function.
 // Returns nullptr on error or dies if MAP_ALLOWNOMEM is not specified.
 void *map(void *Addr, uptr Size, const char *Name, uptr Flags = 0,
-          u64 *Extra = nullptr);
+          OpaquePlatformData *Extra = nullptr);
 
 // Indicates that we are getting rid of the whole mapping, which might have
 // further consequences on Extra, depending on the platform.
 #define UNMAP_ALL (1U << 0)
 
-void unmap(void *Addr, uptr Size, uptr Flags = 0, u64 *Extra = nullptr);
+void unmap(void *Addr, uptr Size, uptr Flags = 0,
+           OpaquePlatformData *Extra = nullptr);
 
 void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
-                      u64 *Extra = nullptr);
+                      OpaquePlatformData *Extra = nullptr);
 
 // Internal map & unmap fatal error. This must not call map().
 void NORETURN dieOnMapUnmapError(bool OutOfMemory = false);
Index: lib/scudo/standalone/fuchsia.cc
===================================================================
--- lib/scudo/standalone/fuchsia.cc
+++ lib/scudo/standalone/fuchsia.cc
@@ -38,44 +38,30 @@
 struct MapInfo {
   zx_handle_t Vmar;
   zx_handle_t Vmo;
+  uintptr_t VmarBase;
+  uint64_t VmoSize;
 };
-COMPILER_CHECK(sizeof(MapInfo) == sizeof(u64));
+COMPILER_CHECK(sizeof(MapInfo) <= sizeof(OpaquePlatformData));
 
 static void *allocateVmar(uptr Size, MapInfo *Info, bool AllowNoMem) {
   // Only scenario so far.
   DCHECK(Info);
   DCHECK_EQ(Info->Vmar, ZX_HANDLE_INVALID);
 
-  uintptr_t P;
   const zx_status_t Status = _zx_vmar_allocate(
       _zx_vmar_root_self(),
       ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0,
-      Size, &Info->Vmar, &P);
+      Size, &Info->Vmar, &Info->VmarBase);
   if (Status != ZX_OK) {
     if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
       dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
     return nullptr;
   }
-  return reinterpret_cast<void *>(P);
-}
-
-// Returns the offset of an address range in a Vmar, while checking that said
-// range fully belongs to the Vmar. An alternative would be to keep track of
-// both the base & length to avoid calling this. The tradeoff being a system
-// call vs two extra uptr of storage.
-// TODO(kostyak): revisit the implications of both options.
-static uint64_t getOffsetInVmar(zx_handle_t Vmar, void *Addr, uintptr_t Size) {
-  zx_info_vmar_t Info;
-  const zx_status_t Status = _zx_object_get_info(
-      Vmar, ZX_INFO_VMAR, &Info, sizeof(Info), nullptr, nullptr);
-  CHECK_EQ(Status, ZX_OK);
-  const uint64_t Offset = reinterpret_cast<uintptr_t>(Addr) - Info.base;
-  CHECK_LE(Offset, Info.base + Info.len);
-  CHECK_LE(Offset + Size, Info.base + Info.len);
-  return Offset;
+  return reinterpret_cast<void *>(Info->VmarBase);
 }
 
-void *map(void *Addr, uptr Size, const char *Name, uptr Flags, u64 *Extra) {
+void *map(void *Addr, uptr Size, const char *Name, uptr Flags,
+          OpaquePlatformData *Extra) {
   DCHECK_EQ(Size % PAGE_SIZE, 0);
   const bool AllowNoMem = !!(Flags & MAP_ALLOWNOMEM);
   MapInfo *Info = reinterpret_cast<MapInfo *>(Extra);
@@ -95,9 +81,8 @@
     CHECK(Addr);
     DCHECK(Flags & MAP_RESIZABLE);
     Vmo = Info->Vmo;
-    Status = _zx_vmo_get_size(Vmo, &VmoSize);
-    if (Status == ZX_OK)
-      Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
+    VmoSize = Info->VmoSize;
+    Status = _zx_vmo_set_size(Vmo, VmoSize + Size);
     if (Status != ZX_OK) {
       if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem)
         dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
@@ -116,7 +101,8 @@
 
   uintptr_t P;
   zx_vm_option_t MapFlags = ZX_VM_PERM_READ | ZX_VM_PERM_WRITE;
-  const uint64_t Offset = Addr ? getOffsetInVmar(Vmar, Addr, Size) : 0;
+  const uint64_t Offset =
+      Addr ? reinterpret_cast<uintptr_t>(Addr) - Info->VmarBase : 0;
   if (Offset)
     MapFlags |= ZX_VM_SPECIFIC;
   Status = _zx_vmar_map(Vmar, MapFlags, Offset, Vmo, VmoSize, Size, &P);
@@ -133,11 +119,13 @@
       dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY);
     return nullptr;
   }
+  if (Info)
+    Info->VmoSize += Size;
 
   return reinterpret_cast<void *>(P);
 }
 
-void unmap(void *Addr, uptr Size, uptr Flags, u64 *Extra) {
+void unmap(void *Addr, uptr Size, uptr Flags, OpaquePlatformData *Extra) {
   MapInfo *Info = reinterpret_cast<MapInfo *>(Extra);
   if (Flags & UNMAP_ALL) {
     DCHECK_NE(Info, nullptr);
@@ -156,13 +144,12 @@
   if (Info) {
     if (Info->Vmo != ZX_HANDLE_INVALID)
       CHECK_EQ(_zx_handle_close(Info->Vmo), ZX_OK);
-    Info->Vmo = ZX_HANDLE_INVALID;
-    Info->Vmar = ZX_HANDLE_INVALID;
+    memset(Info, 0, sizeof(*Info));
   }
 }
 
 void releasePagesToOS(UNUSED uptr BaseAddress, uptr Offset, uptr Size,
-                      u64 *Extra) {
+                      OpaquePlatformData *Extra) {
   MapInfo *Info = reinterpret_cast<MapInfo *>(Extra);
   DCHECK(Info);
   DCHECK_NE(Info->Vmar, ZX_HANDLE_INVALID);
@@ -188,7 +175,7 @@
   CHECK_EQ(Status, ZX_OK);
 }
 
-u64 getMonotonicTime() { return _zx_clock_get(ZX_CLOCK_MONOTONIC); }
+u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }
 
 u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); }
 
Index: lib/scudo/standalone/linux.cc
===================================================================
--- lib/scudo/standalone/linux.cc
+++ lib/scudo/standalone/linux.cc
@@ -44,7 +44,7 @@
 void NORETURN die() { abort(); }
 
 void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
-          UNUSED u64 *Extra) {
+          UNUSED OpaquePlatformData *Extra) {
   int MmapFlags = MAP_PRIVATE | MAP_ANON;
   if (Flags & MAP_NOACCESS)
     MmapFlags |= MAP_NORESERVE;
@@ -68,13 +68,14 @@
   return P;
 }
 
-void unmap(void *Addr, uptr Size, UNUSED uptr Flags, UNUSED u64 *Extra) {
+void unmap(void *Addr, uptr Size, UNUSED uptr Flags,
+           UNUSED OpaquePlatformData *Extra) {
   if (munmap(Addr, Size) != 0)
     dieOnMapUnmapError();
 }
 
 void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
-                      UNUSED u64 *Extra) {
+                      UNUSED OpaquePlatformData *Extra) {
   void *Addr = reinterpret_cast<void *>(BaseAddress + Offset);
   while (madvise(Addr, Size, MADV_DONTNEED) == -1 && errno == EAGAIN) {
   }
Index: lib/scudo/standalone/secondary.h
===================================================================
--- /dev/null
+++ lib/scudo/standalone/secondary.h
@@ -0,0 +1,208 @@
+//===-- secondary.h ---------------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef SCUDO_SECONDARY_H_
+#define SCUDO_SECONDARY_H_
+
+#include "common.h"
+#include "mutex.h"
+#include "stats.h"
+#include "string_utils.h"
+
+namespace scudo {
+
+// This allocator wraps the platform allocation primitives, and as such is on
+// the slower side and should preferably be used for larger sized allocations.
+// Blocks allocated will be preceded and followed by a guard page, and hold
+// their own header that is not checksummed: the guard pages and the Combined
+// header should be enough for our purpose.
+
+namespace LargeBlock {
+
+struct Header {
+  LargeBlock::Header *Prev;
+  LargeBlock::Header *Next;
+  uptr BlockEnd;
+  uptr MapBase;
+  uptr MapSize;
+  OpaquePlatformData PlatformData;
+};
+
+constexpr uptr getHeaderSize() {
+  return roundUpTo(sizeof(Header), 1U << SCUDO_MIN_ALIGNMENT_LOG);
+}
+
+static Header *getHeader(uptr Ptr) {
+  return reinterpret_cast<Header *>(Ptr - getHeaderSize());
+}
+
+static Header *getHeader(const void *Ptr) {
+  return getHeader(reinterpret_cast<uptr>(Ptr));
+}
+
+} // namespace LargeBlock
+
+class MapAllocator {
+public:
+  void initLinkerInitialized(GlobalStats *S) {
+    Stats.initLinkerInitialized();
+    if (S)
+      S->link(&Stats);
+  }
+  void init(GlobalStats *S) {
+    memset(this, 0, sizeof(*this));
+    initLinkerInitialized(S);
+  }
+
+  // As with the primary, the size passed to this function includes any desired
+  // alignment, so that the frontend can align the user allocation. The hint
+  // parameter allows us to be able unmap spurious memory when dealing with
+  // larger (greater than a page) alignments.
+  NOINLINE void *allocate(uptr Size, uptr AlignmentHint = 0,
+                          uptr *BlockEnd = nullptr) {
+    DCHECK_GT(Size, AlignmentHint);
+    const uptr PageSize = getPageSizeCached();
+    const uptr MapSize =
+        roundUpTo(Size + LargeBlock::getHeaderSize(), PageSize) + 2 * PageSize;
+    OpaquePlatformData PlatformData = {};
+    uptr MapBase = reinterpret_cast<uptr>(
+        map(nullptr, MapSize, "scudo:secondary", MAP_NOACCESS | MAP_ALLOWNOMEM,
+            &PlatformData));
+    if (UNLIKELY(!MapBase))
+      return nullptr;
+    uptr CommitBase = MapBase + PageSize;
+    uptr MapEnd = MapBase + MapSize;
+
+    // In the unlikely event of alignments larger than a page, adjust the amount
+    // of memory we want to commit, and trim the extra memory.
+    if (UNLIKELY(AlignmentHint >= PageSize)) {
+      // For alignments greater than or equal to a page, the user pointer ends
+      // up on a page boundary, and our headers will live in the preceding page.
+      CommitBase = roundUpTo(MapBase + PageSize + 1, AlignmentHint) - PageSize;
+      const uptr NewMapBase = CommitBase - PageSize;
+      DCHECK_GE(NewMapBase, MapBase);
+      // We only trim the extra memory on 32-bit platforms: 64-bit platforms
+      // are less constrained memory wise, and that saves us two syscalls.
+      if (SCUDO_WORDSIZE == 32U && NewMapBase != MapBase) {
+        unmap(reinterpret_cast<void *>(MapBase), NewMapBase - MapBase, 0,
+              &PlatformData);
+        MapBase = NewMapBase;
+      }
+      const uptr NewMapEnd = CommitBase + PageSize +
+                             roundUpTo((Size - AlignmentHint), PageSize) +
+                             PageSize;
+      DCHECK_LE(NewMapEnd, MapEnd);
+      if (SCUDO_WORDSIZE == 32U && NewMapEnd != MapEnd) {
+        unmap(reinterpret_cast<void *>(NewMapEnd), MapEnd - NewMapEnd, 0,
+              &PlatformData);
+        MapEnd = NewMapEnd;
+      }
+    }
+
+    const uptr CommitSize = MapEnd - PageSize - CommitBase;
+    const uptr Ptr = reinterpret_cast<uptr>(
+        map(reinterpret_cast<void *>(CommitBase), CommitSize, "scudo:secondary",
+            0, &PlatformData));
+    LargeBlock::Header *H = reinterpret_cast<LargeBlock::Header *>(Ptr);
+    H->MapBase = MapBase;
+    H->MapSize = MapEnd - MapBase;
+    H->BlockEnd = CommitBase + CommitSize;
+    memcpy(&H->PlatformData, &PlatformData, sizeof(PlatformData));
+    {
+      SpinMutexLock L(&Mutex);
+      if (!Tail) {
+        Tail = H;
+      } else {
+        Tail->Next = H;
+        H->Prev = Tail;
+        Tail = H;
+      }
+      AllocatedBytes += CommitSize;
+      if (LargestSize < CommitSize)
+        LargestSize = CommitSize;
+      NumberOfAllocs++;
+      Stats.add(StatAllocated, CommitSize);
+      Stats.add(StatMapped, CommitSize);
+    }
+    if (BlockEnd)
+      *BlockEnd = CommitBase + CommitSize;
+    return reinterpret_cast<void *>(Ptr + LargeBlock::getHeaderSize());
+  }
+
+  NOINLINE void deallocate(void *Ptr) {
+    LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
+    {
+      SpinMutexLock L(&Mutex);
+      LargeBlock::Header *Prev = H->Prev;
+      LargeBlock::Header *Next = H->Next;
+      if (Prev) {
+        CHECK_EQ(Prev->Next, H);
+        Prev->Next = Next;
+      }
+      if (Next) {
+        CHECK_EQ(Next->Prev, H);
+        Next->Prev = Prev;
+      }
+      if (Tail == H) {
+        CHECK(!Next);
+        Tail = Prev;
+      } else {
+        CHECK(Next);
+      }
+      const uptr CommitSize = H->BlockEnd - reinterpret_cast<uptr>(H);
+      FreedBytes += CommitSize;
+      NumberOfFrees++;
+      Stats.sub(StatAllocated, CommitSize);
+      Stats.sub(StatMapped, CommitSize);
+    }
+    void *Addr = reinterpret_cast<void *>(H->MapBase);
+    const uptr Size = H->MapSize;
+    OpaquePlatformData PlatformData;
+    memcpy(&PlatformData, &H->PlatformData, sizeof(PlatformData));
+    unmap(Addr, Size, UNMAP_ALL, &PlatformData);
+  }
+
+  static uptr getBlockEnd(void *Ptr) {
+    return LargeBlock::getHeader(Ptr)->BlockEnd;
+  }
+
+  static uptr getBlockSize(void *Ptr) {
+    return getBlockEnd(Ptr) - reinterpret_cast<uptr>(Ptr);
+  }
+
+  void printStats() const {
+    Printf("Stats: MapAllocator: allocated %zd times (%zdK), freed %zd times "
+           "(%zdK), remains %zd (%zdK) max %zdM\n",
+           NumberOfAllocs, AllocatedBytes >> 10, NumberOfFrees,
+           FreedBytes >> 10, NumberOfAllocs - NumberOfFrees,
+           (AllocatedBytes - FreedBytes) >> 10, LargestSize >> 20);
+  }
+
+  void disable() { Mutex.lock(); }
+
+  void enable() { Mutex.unlock(); }
+
+  template <typename F> void iterateOverBlocks(F Callback) const {
+    for (LargeBlock::Header *H = Tail; H != nullptr; H = H->Prev)
+      Callback(reinterpret_cast<uptr>(H) + LargeBlock::getHeaderSize());
+  }
+
+private:
+  StaticSpinMutex Mutex;
+  LargeBlock::Header *Tail;
+  uptr AllocatedBytes;
+  uptr FreedBytes;
+  uptr LargestSize;
+  u32 NumberOfAllocs;
+  u32 NumberOfFrees;
+  LocalStats Stats;
+};
+
+} // namespace scudo
+
+#endif // SCUDO_SECONDARY_H_
Index: lib/scudo/standalone/tests/CMakeLists.txt
===================================================================
--- lib/scudo/standalone/tests/CMakeLists.txt
+++ lib/scudo/standalone/tests/CMakeLists.txt
@@ -57,6 +57,7 @@
   map_test.cc
   mutex_test.cc
   report_test.cc
+  secondary_test.cc
   stats_test.cc
   strings_test.cc
   vector_test.cc
Index: lib/scudo/standalone/tests/bytemap_test.cc
===================================================================
--- lib/scudo/standalone/tests/bytemap_test.cc
+++ lib/scudo/standalone/tests/bytemap_test.cc
@@ -1,4 +1,4 @@
-//===-- bytemap_test.cc------------------------------------------*- C++ -*-===//
+//===-- bytemap_test.cc -----------------------------------------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
Index: lib/scudo/standalone/tests/map_test.cc
===================================================================
--- lib/scudo/standalone/tests/map_test.cc
+++ lib/scudo/standalone/tests/map_test.cc
@@ -1,4 +1,4 @@
-//===-- map_test.cc----------------------------------------------*- C++ -*-===//
+//===-- map_test.cc ---------------------------------------------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
@@ -16,7 +16,7 @@
 
 TEST(ScudoMapTest, MapNoAccessUnmap) {
   const scudo::uptr Size = 4 * scudo::getPageSizeCached();
-  scudo::u64 PlatformData = 0;
+  scudo::OpaquePlatformData PlatformData = {};
   void *P = scudo::map(nullptr, Size, MappingName, MAP_NOACCESS, &PlatformData);
   EXPECT_NE(P, nullptr);
   EXPECT_DEATH(memset(P, 0xaa, Size), "");
@@ -25,7 +25,7 @@
 
 TEST(ScudoMapTest, MapUnmap) {
   const scudo::uptr Size = 4 * scudo::getPageSizeCached();
-  scudo::u64 PlatformData = 0;
+  scudo::OpaquePlatformData PlatformData = {};
   void *P = scudo::map(nullptr, Size, MappingName, 0, &PlatformData);
   EXPECT_NE(P, nullptr);
   memset(P, 0xaa, Size);
@@ -36,7 +36,7 @@
 TEST(ScudoMapTest, MapWithGuardUnmap) {
   const scudo::uptr PageSize = scudo::getPageSizeCached();
   const scudo::uptr Size = 4 * PageSize;
-  scudo::u64 PlatformData = 0;
+  scudo::OpaquePlatformData PlatformData = {};
   void *P = scudo::map(nullptr, Size + 2 * PageSize, MappingName, MAP_NOACCESS,
                        &PlatformData);
   EXPECT_NE(P, nullptr);
@@ -51,7 +51,7 @@
 TEST(ScudoMapTest, MapGrowUnmap) {
   const scudo::uptr PageSize = scudo::getPageSizeCached();
   const scudo::uptr Size = 4 * PageSize;
-  scudo::u64 PlatformData = 0;
+  scudo::OpaquePlatformData PlatformData = {};
   void *P = scudo::map(nullptr, Size, MappingName, MAP_NOACCESS, &PlatformData);
   EXPECT_NE(P, nullptr);
   void *Q =
Index: lib/scudo/standalone/tests/mutex_test.cc
===================================================================
--- lib/scudo/standalone/tests/mutex_test.cc
+++ lib/scudo/standalone/tests/mutex_test.cc
@@ -1,4 +1,4 @@
-//===-- mutex_test.cc--------------------------------------------*- C++ -*-===//
+//===-- mutex_test.cc -------------------------------------------*- C++ -*-===//
 //
 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
 // See https://llvm.org/LICENSE.txt for license information.
Index: lib/scudo/standalone/tests/secondary_test.cc
===================================================================
--- /dev/null
+++ lib/scudo/standalone/tests/secondary_test.cc
@@ -0,0 +1,135 @@
+//===-- secondary_test.cc ---------------------------------------*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+
+#include "secondary.h"
+
+#include "gtest/gtest.h"
+
+#include <stdio.h>
+
+#include <condition_variable>
+#include <mutex>
+#include <thread>
+
+TEST(ScudoSecondaryTest, SecondaryBasic) {
+  scudo::GlobalStats S;
+  S.init();
+  scudo::MapAllocator *L = new scudo::MapAllocator;
+  L->init(&S);
+  const scudo::uptr Size = 1U << 16;
+  void *P = L->allocate(Size);
+  EXPECT_NE(P, nullptr);
+  memset(P, 'A', Size);
+  L->deallocate(P);
+
+  const scudo::uptr Align = 1U << 16;
+  P = L->allocate(Size + Align, Align);
+  EXPECT_NE(P, nullptr);
+  void *AlignedP = reinterpret_cast<void *>(
+      scudo::roundUpTo(reinterpret_cast<scudo::uptr>(P), Align));
+  memset(AlignedP, 'A', Size);
+  L->deallocate(P);
+
+  std::vector<void *> V;
+  for (scudo::u8 I = 0; I < 32; I++)
+    V.push_back(L->allocate(Size));
+  std::random_shuffle(V.begin(), V.end());
+  while (!V.empty()) {
+    L->deallocate(V.back());
+    V.pop_back();
+  }
+  L->printStats();
+}
+
+// This exercises a variety of combinations of size and alignment for the
+// MapAllocator. The size computation done here mimic the ones done by the
+// combined allocator.
+TEST(ScudoSecondaryTest, SecondaryCombinations) {
+  constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
+  constexpr scudo::uptr HeaderSize = scudo::roundUpTo(8, MinAlign);
+  scudo::MapAllocator *L = new scudo::MapAllocator;
+  L->init(nullptr);
+  for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) {
+    for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16;
+         AlignLog++) {
+      const scudo::uptr Align = 1U << AlignLog;
+      for (scudo::sptr Delta = -128; Delta <= 128; Delta += 8) {
+        if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
+          continue;
+        const scudo::uptr UserSize =
+            scudo::roundUpTo((1U << SizeLog) + Delta, MinAlign);
+        const scudo::uptr Size =
+            HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
+        void *P = L->allocate(Size, Align);
+        EXPECT_NE(P, nullptr);
+        void *AlignedP = reinterpret_cast<void *>(
+            scudo::roundUpTo(reinterpret_cast<scudo::uptr>(P), Align));
+        memset(AlignedP, 0xff, UserSize);
+        L->deallocate(P);
+      }
+    }
+  }
+  L->printStats();
+}
+
+TEST(ScudoSecondaryTest, SecondaryIterate) {
+  scudo::MapAllocator *L = new scudo::MapAllocator;
+  L->init(nullptr);
+  std::vector<void *> V;
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+  for (scudo::u8 I = 0; I < 32; I++)
+    V.push_back(L->allocate((std::rand() % 16) * PageSize));
+  auto Lambda = [V](scudo::uptr Block) {
+    EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast<void *>(Block)),
+              V.end());
+  };
+  L->disable();
+  L->iterateOverBlocks(Lambda);
+  L->enable();
+  while (!V.empty()) {
+    L->deallocate(V.back());
+    V.pop_back();
+  }
+  L->printStats();
+}
+
+std::mutex Mutex;
+std::condition_variable Cv;
+bool Ready = false;
+
+static void performAllocations(scudo::MapAllocator *L) {
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    while (!Ready)
+      Cv.wait(Lock);
+  }
+  std::vector<void *> V;
+  const scudo::uptr PageSize = scudo::getPageSizeCached();
+  for (scudo::u8 I = 0; I < 32; I++)
+    V.push_back(L->allocate((std::rand() % 16) * PageSize));
+  while (!V.empty()) {
+    L->deallocate(V.back());
+    V.pop_back();
+  }
+}
+
+TEST(ScudoSecondaryTest, SecondaryThreadsRace) {
+  scudo::MapAllocator *L = new scudo::MapAllocator;
+  L->init(nullptr);
+  std::thread Threads[10];
+  for (scudo::uptr I = 0; I < 10; I++)
+    Threads[I] = std::thread(performAllocations, L);
+  {
+    std::unique_lock<std::mutex> Lock(Mutex);
+    Ready = true;
+    Cv.notify_all();
+  }
+  for (auto &T : Threads)
+    T.join();
+  L->printStats();
+}