Skip to content

Commit aeb3826

Browse files
author
Kostya Kortchinsky
committedJul 11, 2019
[scudo][standalone] Merge Spin & Blocking mutex into a Hybrid one
Summary: We ran into a problem on Fuchsia where yielding threads would never be deboosted, ultimately resulting in several threads spinning on the same TSD, and no possibility for another thread to be scheduled, dead-locking the process. While this was fixed in Zircon, this lead to discussions about if spinning without a break condition was a good decision, and settled on a new hybrid model that would spin for a while then block. Currently we are using a number of iterations for spinning that is mostly arbitrary (based on sanitizer_common values), but this can be tuned in the future. Since we are touching `common.h`, we also use this change as a vehicle for an Android optimization (the page size is fixed in Bionic, so use a fixed value too). Reviewers: morehouse, hctim, eugenis, dvyukov, vitalybuka Reviewed By: hctim Subscribers: srhines, delcypher, jfb, #sanitizers, llvm-commits Tags: #llvm, #sanitizers Differential Revision: https://reviews.llvm.org/D64358 llvm-svn: 365790
1 parent 96dff91 commit aeb3826

17 files changed

+144
-158
lines changed
 

Diff for: ‎compiler-rt/lib/scudo/standalone/atomic_helpers.h

+8
Original file line numberDiff line numberDiff line change
@@ -126,6 +126,14 @@ INLINE void atomic_store_relaxed(volatile T *A, typename T::Type V) {
126126
atomic_store(A, V, memory_order_relaxed);
127127
}
128128

129+
template <typename T>
130+
INLINE typename T::Type atomic_compare_exchange(volatile T *A,
131+
typename T::Type Cmp,
132+
typename T::Type Xchg) {
133+
atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
134+
return Cmp;
135+
}
136+
129137
} // namespace scudo
130138

131139
#endif // SCUDO_ATOMIC_H_

Diff for: ‎compiler-rt/lib/scudo/standalone/bytemap.h

+3-3
Original file line numberDiff line numberDiff line change
@@ -45,8 +45,8 @@ template <uptr Level1Size, uptr Level2Size> class TwoLevelByteMap {
4545
map(nullptr, sizeof(atomic_uptr) * Level1Size, "scudo:bytemap"));
4646
}
4747
void init() {
48-
initLinkerInitialized();
4948
Mutex.init();
49+
initLinkerInitialized();
5050
}
5151

5252
void reset() {
@@ -92,7 +92,7 @@ template <uptr Level1Size, uptr Level2Size> class TwoLevelByteMap {
9292
u8 *getOrCreate(uptr Index) {
9393
u8 *Res = get(Index);
9494
if (!Res) {
95-
SpinMutexLock L(&Mutex);
95+
ScopedLock L(Mutex);
9696
if (!(Res = get(Index))) {
9797
Res = reinterpret_cast<u8 *>(map(nullptr, Level2Size, "scudo:bytemap"));
9898
atomic_store(&Level1Map[Index], reinterpret_cast<uptr>(Res),
@@ -103,7 +103,7 @@ template <uptr Level1Size, uptr Level2Size> class TwoLevelByteMap {
103103
}
104104

105105
atomic_uptr *Level1Map;
106-
StaticSpinMutex Mutex;
106+
HybridMutex Mutex;
107107
};
108108

109109
} // namespace scudo

Diff for: ‎compiler-rt/lib/scudo/standalone/common.h

+3-2
Original file line numberDiff line numberDiff line change
@@ -115,11 +115,12 @@ INLINE void yieldProcessor(u8 Count) {
115115

116116
// Platform specific functions.
117117

118-
void yieldPlatform();
119-
120118
extern uptr PageSizeCached;
121119
uptr getPageSizeSlow();
122120
INLINE uptr getPageSizeCached() {
121+
// Bionic uses a hardcoded value.
122+
if (SCUDO_ANDROID)
123+
return 4096U;
123124
if (LIKELY(PageSizeCached))
124125
return PageSizeCached;
125126
return getPageSizeSlow();

Diff for: ‎compiler-rt/lib/scudo/standalone/fuchsia.cc

+9-12
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,6 @@
2323

2424
namespace scudo {
2525

26-
void yieldPlatform() {
27-
const zx_status_t Status = _zx_nanosleep(0);
28-
CHECK_EQ(Status, ZX_OK);
29-
}
30-
3126
uptr getPageSize() { return PAGE_SIZE; }
3227

3328
void NORETURN die() { __builtin_trap(); }
@@ -155,18 +150,20 @@ const char *getEnv(const char *Name) { return getenv(Name); }
155150
// Note: we need to flag these methods with __TA_NO_THREAD_SAFETY_ANALYSIS
156151
// because the Fuchsia implementation of sync_mutex_t has clang thread safety
157152
// annotations. Were we to apply proper capability annotations to the top level
158-
// BlockingMutex class itself, they would not be needed. As it stands, the
153+
// HybridMutex class itself, they would not be needed. As it stands, the
159154
// thread analysis thinks that we are locking the mutex and accidentally leaving
160155
// it locked on the way out.
161-
void BlockingMutex::lock() __TA_NO_THREAD_SAFETY_ANALYSIS {
156+
bool HybridMutex::tryLock() __TA_NO_THREAD_SAFETY_ANALYSIS {
162157
// Size and alignment must be compatible between both types.
163-
COMPILER_CHECK(sizeof(sync_mutex_t) <= sizeof(OpaqueStorage));
164-
COMPILER_CHECK(!(alignof(decltype(OpaqueStorage)) % alignof(sync_mutex_t)));
165-
sync_mutex_lock(reinterpret_cast<sync_mutex_t *>(OpaqueStorage));
158+
return sync_mutex_trylock(&M) == ZX_OK;
159+
}
160+
161+
void HybridMutex::lockSlow() __TA_NO_THREAD_SAFETY_ANALYSIS {
162+
sync_mutex_lock(&M);
166163
}
167164

168-
void BlockingMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
169-
sync_mutex_unlock(reinterpret_cast<sync_mutex_t *>(OpaqueStorage));
165+
void HybridMutex::unlock() __TA_NO_THREAD_SAFETY_ANALYSIS {
166+
sync_mutex_unlock(&M);
170167
}
171168

172169
u64 getMonotonicTime() { return _zx_clock_get_monotonic(); }

Diff for: ‎compiler-rt/lib/scudo/standalone/linux.cc

+33-20
Original file line numberDiff line numberDiff line change
@@ -37,24 +37,25 @@
3737

3838
namespace scudo {
3939

40-
void yieldPlatform() { sched_yield(); }
41-
4240
uptr getPageSize() { return static_cast<uptr>(sysconf(_SC_PAGESIZE)); }
4341

4442
void NORETURN die() { abort(); }
4543

4644
void *map(void *Addr, uptr Size, UNUSED const char *Name, uptr Flags,
4745
UNUSED MapPlatformData *Data) {
4846
int MmapFlags = MAP_PRIVATE | MAP_ANON;
49-
if (Flags & MAP_NOACCESS)
47+
int MmapProt;
48+
if (Flags & MAP_NOACCESS) {
5049
MmapFlags |= MAP_NORESERVE;
50+
MmapProt = PROT_NONE;
51+
} else {
52+
MmapProt = PROT_READ | PROT_WRITE;
53+
}
5154
if (Addr) {
5255
// Currently no scenario for a noaccess mapping with a fixed address.
5356
DCHECK_EQ(Flags & MAP_NOACCESS, 0);
5457
MmapFlags |= MAP_FIXED;
5558
}
56-
const int MmapProt =
57-
(Flags & MAP_NOACCESS) ? PROT_NONE : PROT_READ | PROT_WRITE;
5859
void *P = mmap(Addr, Size, MmapProt, MmapFlags, -1, 0);
5960
if (P == MAP_FAILED) {
6061
if (!(Flags & MAP_ALLOWNOMEM) || errno != ENOMEM)
@@ -84,22 +85,34 @@ void releasePagesToOS(uptr BaseAddress, uptr Offset, uptr Size,
8485
// Calling getenv should be fine (c)(tm) at any time.
8586
const char *getEnv(const char *Name) { return getenv(Name); }
8687

87-
void BlockingMutex::lock() {
88-
atomic_u32 *M = reinterpret_cast<atomic_u32 *>(&OpaqueStorage);
89-
if (atomic_exchange(M, MtxLocked, memory_order_acquire) == MtxUnlocked)
88+
namespace {
89+
enum State : u32 { Unlocked = 0, Locked = 1, Sleeping = 2 };
90+
}
91+
92+
bool HybridMutex::tryLock() {
93+
return atomic_compare_exchange(&M, Unlocked, Locked) == Unlocked;
94+
}
95+
96+
// The following is based on https://akkadia.org/drepper/futex.pdf.
97+
void HybridMutex::lockSlow() {
98+
u32 V = atomic_compare_exchange(&M, Unlocked, Locked);
99+
if (V == Unlocked)
90100
return;
91-
while (atomic_exchange(M, MtxSleeping, memory_order_acquire) != MtxUnlocked)
92-
syscall(SYS_futex, reinterpret_cast<uptr>(OpaqueStorage),
93-
FUTEX_WAIT_PRIVATE, MtxSleeping, nullptr, nullptr, 0);
101+
if (V != Sleeping)
102+
V = atomic_exchange(&M, Sleeping, memory_order_acquire);
103+
while (V != Unlocked) {
104+
syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAIT_PRIVATE, Sleeping,
105+
nullptr, nullptr, 0);
106+
V = atomic_exchange(&M, Sleeping, memory_order_acquire);
107+
}
94108
}
95109

96-
void BlockingMutex::unlock() {
97-
atomic_u32 *M = reinterpret_cast<atomic_u32 *>(&OpaqueStorage);
98-
const u32 V = atomic_exchange(M, MtxUnlocked, memory_order_release);
99-
DCHECK_NE(V, MtxUnlocked);
100-
if (V == MtxSleeping)
101-
syscall(SYS_futex, reinterpret_cast<uptr>(OpaqueStorage),
102-
FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0);
110+
void HybridMutex::unlock() {
111+
if (atomic_fetch_sub(&M, 1U, memory_order_release) != Locked) {
112+
atomic_store(&M, Unlocked, memory_order_release);
113+
syscall(SYS_futex, reinterpret_cast<uptr>(&M), FUTEX_WAKE_PRIVATE, 1,
114+
nullptr, nullptr, 0);
115+
}
103116
}
104117

105118
u64 getMonotonicTime() {
@@ -141,8 +154,8 @@ bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) {
141154
}
142155

143156
void outputRaw(const char *Buffer) {
144-
static StaticSpinMutex Mutex;
145-
SpinMutexLock L(&Mutex);
157+
static HybridMutex Mutex;
158+
ScopedLock L(Mutex);
146159
write(2, Buffer, strlen(Buffer));
147160
}
148161

Diff for: ‎compiler-rt/lib/scudo/standalone/mutex.h

+36-56
Original file line numberDiff line numberDiff line change
@@ -12,82 +12,62 @@
1212
#include "atomic_helpers.h"
1313
#include "common.h"
1414

15+
#include <string.h>
16+
17+
#if SCUDO_FUCHSIA
18+
#include <lib/sync/mutex.h> // for sync_mutex_t
19+
#endif
20+
1521
namespace scudo {
1622

17-
class StaticSpinMutex {
23+
class HybridMutex {
1824
public:
19-
void init() { atomic_store_relaxed(&State, 0); }
20-
21-
void lock() {
25+
void init() { memset(this, 0, sizeof(*this)); }
26+
bool tryLock();
27+
NOINLINE void lock() {
2228
if (tryLock())
2329
return;
24-
lockSlow();
25-
}
26-
27-
bool tryLock() {
28-
return atomic_exchange(&State, 1, memory_order_acquire) == 0;
29-
}
30-
31-
void unlock() { atomic_store(&State, 0, memory_order_release); }
32-
33-
void checkLocked() { CHECK_EQ(atomic_load_relaxed(&State), 1); }
34-
35-
private:
36-
atomic_u8 State;
37-
38-
void NOINLINE lockSlow() {
39-
for (u32 I = 0;; I++) {
40-
if (I < 10)
41-
yieldProcessor(10);
42-
else
43-
yieldPlatform();
44-
if (atomic_load_relaxed(&State) == 0 &&
45-
atomic_exchange(&State, 1, memory_order_acquire) == 0)
30+
// The compiler may try to fully unroll the loop, ending up in a
31+
// NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
32+
// is large, ugly and unneeded, a compact loop is better for our purpose
33+
// here. Use a pragma to tell the compiler not to unroll the loop.
34+
#ifdef __clang__
35+
#pragma nounroll
36+
#endif
37+
for (u8 I = 0U; I < NumberOfTries; I++) {
38+
yieldProcessor(NumberOfYields);
39+
if (tryLock())
4640
return;
4741
}
42+
lockSlow();
4843
}
49-
};
50-
51-
class SpinMutex : public StaticSpinMutex {
52-
public:
53-
SpinMutex() { init(); }
44+
void unlock();
5445

5546
private:
56-
SpinMutex(const SpinMutex &) = delete;
57-
void operator=(const SpinMutex &) = delete;
58-
};
47+
static constexpr u8 NumberOfTries = 10U;
48+
static constexpr u8 NumberOfYields = 10U;
5949

60-
class BlockingMutex {
61-
public:
62-
explicit constexpr BlockingMutex(LinkerInitialized) : OpaqueStorage{} {}
63-
BlockingMutex() { memset(this, 0, sizeof(*this)); }
64-
void lock();
65-
void unlock();
66-
void checkLocked() {
67-
atomic_u32 *M = reinterpret_cast<atomic_u32 *>(&OpaqueStorage);
68-
CHECK_NE(MtxUnlocked, atomic_load_relaxed(M));
69-
}
50+
#if SCUDO_LINUX
51+
atomic_u32 M;
52+
#elif SCUDO_FUCHSIA
53+
sync_mutex_t M;
54+
#endif
7055

71-
private:
72-
enum MutexState { MtxUnlocked = 0, MtxLocked = 1, MtxSleeping = 2 };
73-
uptr OpaqueStorage[1];
56+
void lockSlow();
7457
};
7558

76-
template <typename MutexType> class GenericScopedLock {
59+
class ScopedLock {
7760
public:
78-
explicit GenericScopedLock(MutexType *M) : Mutex(M) { Mutex->lock(); }
79-
~GenericScopedLock() { Mutex->unlock(); }
61+
explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); }
62+
~ScopedLock() { Mutex.unlock(); }
8063

8164
private:
82-
MutexType *Mutex;
65+
HybridMutex &Mutex;
8366

84-
GenericScopedLock(const GenericScopedLock &) = delete;
85-
void operator=(const GenericScopedLock &) = delete;
67+
ScopedLock(const ScopedLock &) = delete;
68+
void operator=(const ScopedLock &) = delete;
8669
};
8770

88-
typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
89-
typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
90-
9171
} // namespace scudo
9272

9373
#endif // SCUDO_MUTEX_H_

Diff for: ‎compiler-rt/lib/scudo/standalone/primary32.h

+7-7
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
9797
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
9898
DCHECK_LT(ClassId, NumClasses);
9999
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
100-
BlockingMutexLock L(&Sci->Mutex);
100+
ScopedLock L(Sci->Mutex);
101101
TransferBatch *B = Sci->FreeList.front();
102102
if (B)
103103
Sci->FreeList.pop_front();
@@ -115,7 +115,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
115115
DCHECK_LT(ClassId, NumClasses);
116116
DCHECK_GT(B->getCount(), 0);
117117
SizeClassInfo *Sci = getSizeClassInfo(ClassId);
118-
BlockingMutexLock L(&Sci->Mutex);
118+
ScopedLock L(Sci->Mutex);
119119
Sci->FreeList.push_front(B);
120120
Sci->Stats.PushedBlocks += B->getCount();
121121
if (Sci->CanRelease)
@@ -164,7 +164,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
164164
void releaseToOS() {
165165
for (uptr I = 1; I < NumClasses; I++) {
166166
SizeClassInfo *Sci = getSizeClassInfo(I);
167-
BlockingMutexLock L(&Sci->Mutex);
167+
ScopedLock L(Sci->Mutex);
168168
releaseToOSMaybe(Sci, I, /*Force=*/true);
169169
}
170170
}
@@ -192,7 +192,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
192192
};
193193

194194
struct ALIGNED(SCUDO_CACHE_LINE_SIZE) SizeClassInfo {
195-
BlockingMutex Mutex;
195+
HybridMutex Mutex;
196196
IntrusiveList<TransferBatch> FreeList;
197197
SizeClassStats Stats;
198198
bool CanRelease;
@@ -217,7 +217,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
217217
const uptr MapEnd = MapBase + MapSize;
218218
uptr Region = MapBase;
219219
if (isAligned(Region, RegionSize)) {
220-
SpinMutexLock L(&RegionsStashMutex);
220+
ScopedLock L(RegionsStashMutex);
221221
if (NumberOfStashedRegions < MaxStashedRegions)
222222
RegionsStash[NumberOfStashedRegions++] = MapBase + RegionSize;
223223
else
@@ -237,7 +237,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
237237
DCHECK_LT(ClassId, NumClasses);
238238
uptr Region = 0;
239239
{
240-
SpinMutexLock L(&RegionsStashMutex);
240+
ScopedLock L(RegionsStashMutex);
241241
if (NumberOfStashedRegions > 0)
242242
Region = RegionsStash[--NumberOfStashedRegions];
243243
}
@@ -389,7 +389,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator32 {
389389
// Unless several threads request regions simultaneously from different size
390390
// classes, the stash rarely contains more than 1 entry.
391391
static constexpr uptr MaxStashedRegions = 4;
392-
StaticSpinMutex RegionsStashMutex;
392+
HybridMutex RegionsStashMutex;
393393
uptr NumberOfStashedRegions;
394394
uptr RegionsStash[MaxStashedRegions];
395395
};

Diff for: ‎compiler-rt/lib/scudo/standalone/primary64.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -100,7 +100,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator64 {
100100
TransferBatch *popBatch(CacheT *C, uptr ClassId) {
101101
DCHECK_LT(ClassId, NumClasses);
102102
RegionInfo *Region = getRegionInfo(ClassId);
103-
BlockingMutexLock L(&Region->Mutex);
103+
ScopedLock L(Region->Mutex);
104104
TransferBatch *B = Region->FreeList.front();
105105
if (B)
106106
Region->FreeList.pop_front();
@@ -117,7 +117,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator64 {
117117
void pushBatch(uptr ClassId, TransferBatch *B) {
118118
DCHECK_GT(B->getCount(), 0);
119119
RegionInfo *Region = getRegionInfo(ClassId);
120-
BlockingMutexLock L(&Region->Mutex);
120+
ScopedLock L(Region->Mutex);
121121
Region->FreeList.push_front(B);
122122
Region->Stats.PushedBlocks += B->getCount();
123123
if (Region->CanRelease)
@@ -168,7 +168,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator64 {
168168
void releaseToOS() {
169169
for (uptr I = 1; I < NumClasses; I++) {
170170
RegionInfo *Region = getRegionInfo(I);
171-
BlockingMutexLock L(&Region->Mutex);
171+
ScopedLock L(Region->Mutex);
172172
releaseToOSMaybe(Region, I, /*Force=*/true);
173173
}
174174
}
@@ -194,7 +194,7 @@ template <class SizeClassMapT, uptr RegionSizeLog> class SizeClassAllocator64 {
194194
};
195195

196196
struct ALIGNED(SCUDO_CACHE_LINE_SIZE) RegionInfo {
197-
BlockingMutex Mutex;
197+
HybridMutex Mutex;
198198
IntrusiveList<TransferBatch> FreeList;
199199
RegionStats Stats;
200200
bool CanRelease;

Diff for: ‎compiler-rt/lib/scudo/standalone/quarantine.h

+5-5
Original file line numberDiff line numberDiff line change
@@ -202,7 +202,7 @@ template <typename Callback, typename Node> class GlobalQuarantine {
202202

203203
void NOINLINE drain(CacheT *C, Callback Cb) {
204204
{
205-
SpinMutexLock L(&CacheMutex);
205+
ScopedLock L(CacheMutex);
206206
Cache.transfer(C);
207207
}
208208
if (Cache.getSize() > getMaxSize() && RecyleMutex.tryLock())
@@ -211,7 +211,7 @@ template <typename Callback, typename Node> class GlobalQuarantine {
211211

212212
void NOINLINE drainAndRecycle(CacheT *C, Callback Cb) {
213213
{
214-
SpinMutexLock L(&CacheMutex);
214+
ScopedLock L(CacheMutex);
215215
Cache.transfer(C);
216216
}
217217
RecyleMutex.lock();
@@ -227,9 +227,9 @@ template <typename Callback, typename Node> class GlobalQuarantine {
227227

228228
private:
229229
// Read-only data.
230-
alignas(SCUDO_CACHE_LINE_SIZE) StaticSpinMutex CacheMutex;
230+
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex CacheMutex;
231231
CacheT Cache;
232-
alignas(SCUDO_CACHE_LINE_SIZE) StaticSpinMutex RecyleMutex;
232+
alignas(SCUDO_CACHE_LINE_SIZE) HybridMutex RecyleMutex;
233233
atomic_uptr MinSize;
234234
atomic_uptr MaxSize;
235235
alignas(SCUDO_CACHE_LINE_SIZE) atomic_uptr MaxCacheSize;
@@ -238,7 +238,7 @@ template <typename Callback, typename Node> class GlobalQuarantine {
238238
CacheT Tmp;
239239
Tmp.init();
240240
{
241-
SpinMutexLock L(&CacheMutex);
241+
ScopedLock L(CacheMutex);
242242
// Go over the batches and merge partially filled ones to
243243
// save some memory, otherwise batches themselves (since the memory used
244244
// by them is counted against quarantine limit) can overcome the actual

Diff for: ‎compiler-rt/lib/scudo/standalone/secondary.cc

+2-2
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,7 @@ void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
7272
H->BlockEnd = CommitBase + CommitSize;
7373
H->Data = Data;
7474
{
75-
SpinMutexLock L(&Mutex);
75+
ScopedLock L(Mutex);
7676
if (!Tail) {
7777
Tail = H;
7878
} else {
@@ -95,7 +95,7 @@ void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, uptr *BlockEnd) {
9595
void MapAllocator::deallocate(void *Ptr) {
9696
LargeBlock::Header *H = LargeBlock::getHeader(Ptr);
9797
{
98-
SpinMutexLock L(&Mutex);
98+
ScopedLock L(Mutex);
9999
LargeBlock::Header *Prev = H->Prev;
100100
LargeBlock::Header *Next = H->Next;
101101
if (Prev) {

Diff for: ‎compiler-rt/lib/scudo/standalone/secondary.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,7 @@ class MapAllocator {
8282
}
8383

8484
private:
85-
StaticSpinMutex Mutex;
85+
HybridMutex Mutex;
8686
LargeBlock::Header *Tail;
8787
uptr AllocatedBytes;
8888
uptr FreedBytes;

Diff for: ‎compiler-rt/lib/scudo/standalone/stats.h

+4-4
Original file line numberDiff line numberDiff line change
@@ -65,15 +65,15 @@ class GlobalStats : public LocalStats {
6565
}
6666

6767
void link(LocalStats *S) {
68-
SpinMutexLock L(&Mutex);
68+
ScopedLock L(Mutex);
6969
S->Next = Next;
7070
S->Prev = this;
7171
Next->Prev = S;
7272
Next = S;
7373
}
7474

7575
void unlink(LocalStats *S) {
76-
SpinMutexLock L(&Mutex);
76+
ScopedLock L(Mutex);
7777
S->Prev->Next = S->Next;
7878
S->Next->Prev = S->Prev;
7979
for (uptr I = 0; I < StatCount; I++)
@@ -82,7 +82,7 @@ class GlobalStats : public LocalStats {
8282

8383
void get(uptr *S) const {
8484
memset(S, 0, StatCount * sizeof(uptr));
85-
SpinMutexLock L(&Mutex);
85+
ScopedLock L(Mutex);
8686
const LocalStats *Stats = this;
8787
for (;;) {
8888
for (uptr I = 0; I < StatCount; I++)
@@ -97,7 +97,7 @@ class GlobalStats : public LocalStats {
9797
}
9898

9999
private:
100-
mutable StaticSpinMutex Mutex;
100+
mutable HybridMutex Mutex;
101101
};
102102

103103
} // namespace scudo

Diff for: ‎compiler-rt/lib/scudo/standalone/tests/map_test.cc

+6
Original file line numberDiff line numberDiff line change
@@ -11,9 +11,15 @@
1111
#include "gtest/gtest.h"
1212

1313
#include <string.h>
14+
#include <unistd.h>
1415

1516
static const char *MappingName = "scudo:test";
1617

18+
TEST(ScudoMapTest, PageSize) {
19+
EXPECT_EQ(scudo::getPageSizeCached(),
20+
static_cast<scudo::uptr>(getpagesize()));
21+
}
22+
1723
TEST(ScudoMapTest, MapNoAccessUnmap) {
1824
const scudo::uptr Size = 4 * scudo::getPageSizeCached();
1925
scudo::MapPlatformData Data = {};

Diff for: ‎compiler-rt/lib/scudo/standalone/tests/mutex_test.cc

+18-37
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,15 @@
1212

1313
#include <string.h>
1414

15-
template <typename MutexType> class TestData {
15+
class TestData {
1616
public:
17-
explicit TestData(MutexType *M) : Mutex(M) {
17+
explicit TestData(scudo::HybridMutex &M) : Mutex(M) {
1818
for (scudo::u32 I = 0; I < Size; I++)
1919
Data[I] = 0;
2020
}
2121

2222
void write() {
23-
Lock L(Mutex);
23+
scudo::ScopedLock L(Mutex);
2424
T V0 = Data[0];
2525
for (scudo::u32 I = 0; I < Size; I++) {
2626
EXPECT_EQ(Data[I], V0);
@@ -29,14 +29,14 @@ template <typename MutexType> class TestData {
2929
}
3030

3131
void tryWrite() {
32-
if (!Mutex->tryLock())
32+
if (!Mutex.tryLock())
3333
return;
3434
T V0 = Data[0];
3535
for (scudo::u32 I = 0; I < Size; I++) {
3636
EXPECT_EQ(Data[I], V0);
3737
Data[I]++;
3838
}
39-
Mutex->unlock();
39+
Mutex.unlock();
4040
}
4141

4242
void backoff() {
@@ -48,10 +48,9 @@ template <typename MutexType> class TestData {
4848
}
4949

5050
private:
51-
typedef scudo::GenericScopedLock<MutexType> Lock;
5251
static const scudo::u32 Size = 64U;
5352
typedef scudo::u64 T;
54-
MutexType *Mutex;
53+
scudo::HybridMutex &Mutex;
5554
ALIGNED(SCUDO_CACHE_LINE_SIZE) T Data[Size];
5655
};
5756

@@ -62,60 +61,42 @@ const scudo::u32 NumberOfIterations = 4 * 1024;
6261
const scudo::u32 NumberOfIterations = 16 * 1024;
6362
#endif
6463

65-
template <typename MutexType> static void *lockThread(void *Param) {
66-
TestData<MutexType> *Data = reinterpret_cast<TestData<MutexType> *>(Param);
64+
static void *lockThread(void *Param) {
65+
TestData *Data = reinterpret_cast<TestData *>(Param);
6766
for (scudo::u32 I = 0; I < NumberOfIterations; I++) {
6867
Data->write();
6968
Data->backoff();
7069
}
7170
return 0;
7271
}
7372

74-
template <typename MutexType> static void *tryThread(void *Param) {
75-
TestData<MutexType> *Data = reinterpret_cast<TestData<MutexType> *>(Param);
73+
static void *tryThread(void *Param) {
74+
TestData *Data = reinterpret_cast<TestData *>(Param);
7675
for (scudo::u32 I = 0; I < NumberOfIterations; I++) {
7776
Data->tryWrite();
7877
Data->backoff();
7978
}
8079
return 0;
8180
}
8281

83-
template <typename MutexType> static void checkLocked(MutexType *M) {
84-
scudo::GenericScopedLock<MutexType> L(M);
85-
M->checkLocked();
86-
}
87-
88-
TEST(ScudoMutexTest, SpinMutex) {
89-
scudo::SpinMutex M;
82+
TEST(ScudoMutexTest, Mutex) {
83+
scudo::HybridMutex M;
9084
M.init();
91-
TestData<scudo::SpinMutex> Data(&M);
85+
TestData Data(M);
9286
pthread_t Threads[NumberOfThreads];
9387
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
94-
pthread_create(&Threads[I], 0, lockThread<scudo::SpinMutex>, &Data);
88+
pthread_create(&Threads[I], 0, lockThread, &Data);
9589
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
9690
pthread_join(Threads[I], 0);
9791
}
9892

99-
TEST(ScudoMutexTest, SpinMutexTry) {
100-
scudo::SpinMutex M;
93+
TEST(ScudoMutexTest, MutexTry) {
94+
scudo::HybridMutex M;
10195
M.init();
102-
TestData<scudo::SpinMutex> Data(&M);
103-
pthread_t Threads[NumberOfThreads];
104-
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
105-
pthread_create(&Threads[I], 0, tryThread<scudo::SpinMutex>, &Data);
106-
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
107-
pthread_join(Threads[I], 0);
108-
}
109-
110-
TEST(ScudoMutexTest, BlockingMutex) {
111-
scudo::u64 MutexMemory[1024] = {};
112-
scudo::BlockingMutex *M =
113-
new (MutexMemory) scudo::BlockingMutex(scudo::LINKER_INITIALIZED);
114-
TestData<scudo::BlockingMutex> Data(M);
96+
TestData Data(M);
11597
pthread_t Threads[NumberOfThreads];
11698
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
117-
pthread_create(&Threads[I], 0, lockThread<scudo::BlockingMutex>, &Data);
99+
pthread_create(&Threads[I], 0, tryThread, &Data);
118100
for (scudo::u32 I = 0; I < NumberOfThreads; I++)
119101
pthread_join(Threads[I], 0);
120-
checkLocked(M);
121102
}

Diff for: ‎compiler-rt/lib/scudo/standalone/tsd.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ template <class Allocator> struct ALIGNED(SCUDO_CACHE_LINE_SIZE) TSD {
5757
INLINE uptr getPrecedence() { return atomic_load_relaxed(&Precedence); }
5858

5959
private:
60-
StaticSpinMutex Mutex;
60+
HybridMutex Mutex;
6161
atomic_uptr Precedence;
6262
};
6363

Diff for: ‎compiler-rt/lib/scudo/standalone/tsd_exclusive.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -60,7 +60,7 @@ template <class Allocator> struct TSDRegistryExT {
6060

6161
private:
6262
void initOnceMaybe(Allocator *Instance) {
63-
SpinMutexLock L(&Mutex);
63+
ScopedLock L(Mutex);
6464
if (Initialized)
6565
return;
6666
initLinkerInitialized(Instance); // Sets Initialized.
@@ -82,7 +82,7 @@ template <class Allocator> struct TSDRegistryExT {
8282
pthread_key_t PThreadKey;
8383
bool Initialized;
8484
TSD<Allocator> *FallbackTSD;
85-
StaticSpinMutex Mutex;
85+
HybridMutex Mutex;
8686
static THREADLOCAL ThreadState State;
8787
static THREADLOCAL TSD<Allocator> ThreadTSD;
8888

Diff for: ‎compiler-rt/lib/scudo/standalone/tsd_shared.h

+2-2
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,7 @@ template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
9494
}
9595

9696
void initOnceMaybe(Allocator *Instance) {
97-
SpinMutexLock L(&Mutex);
97+
ScopedLock L(Mutex);
9898
if (Initialized)
9999
return;
100100
initLinkerInitialized(Instance); // Sets Initialized.
@@ -152,7 +152,7 @@ template <class Allocator, u32 MaxTSDCount> struct TSDRegistrySharedT {
152152
u32 NumberOfCoPrimes;
153153
u32 CoPrimes[MaxTSDCount];
154154
bool Initialized;
155-
StaticSpinMutex Mutex;
155+
HybridMutex Mutex;
156156
#if SCUDO_LINUX && !SCUDO_ANDROID
157157
static THREADLOCAL TSD<Allocator> *ThreadTSD;
158158
#endif

0 commit comments

Comments
 (0)
Please sign in to comment.