|
| 1 | +//===-- stats.h -------------------------------------------------*- C++ -*-===// |
| 2 | +// |
| 3 | +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | +// See https://llvm.org/LICENSE.txt for license information. |
| 5 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | +// |
| 7 | +//===----------------------------------------------------------------------===// |
| 8 | + |
| 9 | +#ifndef SCUDO_STATS_H_ |
| 10 | +#define SCUDO_STATS_H_ |
| 11 | + |
| 12 | +#include "atomic_helpers.h" |
| 13 | +#include "mutex.h" |
| 14 | + |
| 15 | +#include <string.h> |
| 16 | + |
| 17 | +namespace scudo { |
| 18 | + |
| 19 | +// Memory allocator statistics |
| 20 | +enum StatType { StatAllocated, StatMapped, StatCount }; |
| 21 | + |
| 22 | +typedef uptr StatCounters[StatCount]; |
| 23 | + |
| 24 | +// Per-thread stats, live in per-thread cache. We use atomics so that the |
| 25 | +// numbers themselves are consistent. But we don't use atomic_{add|sub} or a |
| 26 | +// lock, because those are expensive operations , and we only care for the stats |
| 27 | +// to be "somewhat" correct: eg. if we call GlobalStats::get while a thread is |
| 28 | +// LocalStats::add'ing, this is OK, we will still get a meaningful number. |
| 29 | +class LocalStats { |
| 30 | +public: |
| 31 | + void initLinkerInitialized() {} |
| 32 | + void init() { memset(this, 0, sizeof(*this)); } |
| 33 | + |
| 34 | + void add(StatType I, uptr V) { |
| 35 | + V += atomic_load_relaxed(&StatsArray[I]); |
| 36 | + atomic_store_relaxed(&StatsArray[I], V); |
| 37 | + } |
| 38 | + |
| 39 | + void sub(StatType I, uptr V) { |
| 40 | + V = atomic_load_relaxed(&StatsArray[I]) - V; |
| 41 | + atomic_store_relaxed(&StatsArray[I], V); |
| 42 | + } |
| 43 | + |
| 44 | + void set(StatType I, uptr V) { atomic_store_relaxed(&StatsArray[I], V); } |
| 45 | + |
| 46 | + uptr get(StatType I) const { return atomic_load_relaxed(&StatsArray[I]); } |
| 47 | + |
| 48 | +private: |
| 49 | + friend class GlobalStats; |
| 50 | + atomic_uptr StatsArray[StatCount]; |
| 51 | + LocalStats *Next; |
| 52 | + LocalStats *Prev; |
| 53 | +}; |
| 54 | + |
| 55 | +// Global stats, used for aggregation and querying. |
| 56 | +class GlobalStats : public LocalStats { |
| 57 | +public: |
| 58 | + void initLinkerInitialized() { |
| 59 | + Next = this; |
| 60 | + Prev = this; |
| 61 | + } |
| 62 | + void init() { |
| 63 | + memset(this, 0, sizeof(*this)); |
| 64 | + initLinkerInitialized(); |
| 65 | + } |
| 66 | + |
| 67 | + void link(LocalStats *S) { |
| 68 | + SpinMutexLock L(&Mutex); |
| 69 | + S->Next = Next; |
| 70 | + S->Prev = this; |
| 71 | + Next->Prev = S; |
| 72 | + Next = S; |
| 73 | + } |
| 74 | + |
| 75 | + void unlink(LocalStats *S) { |
| 76 | + SpinMutexLock L(&Mutex); |
| 77 | + S->Prev->Next = S->Next; |
| 78 | + S->Next->Prev = S->Prev; |
| 79 | + for (uptr I = 0; I < StatCount; I++) |
| 80 | + add(static_cast<StatType>(I), S->get(static_cast<StatType>(I))); |
| 81 | + } |
| 82 | + |
| 83 | + void get(uptr *S) const { |
| 84 | + memset(S, 0, StatCount * sizeof(uptr)); |
| 85 | + SpinMutexLock L(&Mutex); |
| 86 | + const LocalStats *Stats = this; |
| 87 | + for (;;) { |
| 88 | + for (uptr I = 0; I < StatCount; I++) |
| 89 | + S[I] += Stats->get(static_cast<StatType>(I)); |
| 90 | + Stats = Stats->Next; |
| 91 | + if (Stats == this) |
| 92 | + break; |
| 93 | + } |
| 94 | + // All stats must be non-negative. |
| 95 | + for (uptr I = 0; I < StatCount; I++) |
| 96 | + S[I] = static_cast<sptr>(S[I]) >= 0 ? S[I] : 0; |
| 97 | + } |
| 98 | + |
| 99 | +private: |
| 100 | + mutable StaticSpinMutex Mutex; |
| 101 | +}; |
| 102 | + |
| 103 | +} // namespace scudo |
| 104 | + |
| 105 | +#endif // SCUDO_STATS_H_ |
0 commit comments