Index: lib/sanitizer_common/sanitizer_allocator_local_cache.h =================================================================== --- lib/sanitizer_common/sanitizer_allocator_local_cache.h +++ lib/sanitizer_common/sanitizer_allocator_local_cache.h @@ -19,8 +19,7 @@ // object per thread in TLS, is has to be POD. template struct SizeClassAllocatorLocalCache - : SizeClassAllocator::AllocatorCache { -}; + : SizeClassAllocator::AllocatorCache {}; // Cache used by SizeClassAllocator64. template @@ -46,13 +45,12 @@ if (UNLIKELY(c->count == 0)) { if (UNLIKELY(!Refill(c, allocator, class_id))) return nullptr; + DCHECK_GT(c->count, 0); } - stats_.Add(AllocatorStatAllocated, c->class_size); - CHECK_GT(c->count, 0); CompactPtrT chunk = c->chunks[--c->count]; - void *res = reinterpret_cast(allocator->CompactPtrToPointer( + stats_.Add(AllocatorStatAllocated, c->class_size); + return reinterpret_cast(allocator->CompactPtrToPointer( allocator->GetRegionBeginBySizeClass(class_id), chunk)); - return res; } void Deallocate(SizeClassAllocator *allocator, uptr class_id, void *p) { @@ -60,16 +58,15 @@ CHECK_LT(class_id, kNumClasses); // If the first allocator call on a new thread is a deallocation, then // max_count will be zero, leading to check failure. - InitCache(); PerClass *c = &per_class_[class_id]; - stats_.Sub(AllocatorStatAllocated, c->class_size); - CHECK_NE(c->max_count, 0UL); + InitCache(c); if (UNLIKELY(c->count == c->max_count)) Drain(c, allocator, class_id, c->max_count / 2); CompactPtrT chunk = allocator->PointerToCompactPtr( allocator->GetRegionBeginBySizeClass(class_id), reinterpret_cast(p)); c->chunks[c->count++] = chunk; + stats_.Sub(AllocatorStatAllocated, c->class_size); } void Drain(SizeClassAllocator *allocator) { @@ -94,20 +91,21 @@ PerClass per_class_[kNumClasses]; AllocatorStats stats_; - void InitCache() { - if (LIKELY(per_class_[1].max_count)) + void InitCache(PerClass *c) { + if (LIKELY(c->max_count)) return; for (uptr i = 0; i < kNumClasses; i++) { PerClass *c = &per_class_[i]; c->max_count = 2 * SizeClassMap::MaxCachedHint(i); c->class_size = Allocator::ClassIdToSize(i); } + DCHECK_NE(c->max_count, 0UL); } NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator, uptr class_id) { - InitCache(); - uptr num_requested_chunks = c->max_count / 2; + InitCache(c); + const uptr num_requested_chunks = c->max_count / 2; if (UNLIKELY(!allocator->GetFromAllocator(&stats_, class_id, c->chunks, num_requested_chunks))) return false; @@ -117,9 +115,8 @@ NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, uptr class_id, uptr count) { - InitCache(); CHECK_GE(c->count, count); - uptr first_idx_to_drain = c->count - count; + const uptr first_idx_to_drain = c->count - count; c->count -= count; allocator->ReturnToAllocator(&stats_, class_id, &c->chunks[first_idx_to_drain], count); @@ -164,12 +161,13 @@ CHECK_LT(class_id, kNumClasses); PerClass *c = &per_class_[class_id]; if (UNLIKELY(c->count == 0)) { - if (UNLIKELY(!Refill(allocator, class_id))) + if (UNLIKELY(!Refill(c, allocator, class_id))) return nullptr; + DCHECK_GT(c->count, 0); } - stats_.Add(AllocatorStatAllocated, c->class_size); void *res = c->batch[--c->count]; PREFETCH(c->batch[c->count - 1]); + stats_.Add(AllocatorStatAllocated, c->class_size); return res; } @@ -178,20 +176,19 @@ CHECK_LT(class_id, kNumClasses); // If the first allocator call on a new thread is a deallocation, then // max_count will be zero, leading to check failure. - InitCache(); PerClass *c = &per_class_[class_id]; - stats_.Sub(AllocatorStatAllocated, c->class_size); - CHECK_NE(c->max_count, 0UL); + InitCache(c); if (UNLIKELY(c->count == c->max_count)) - Drain(allocator, class_id); + Drain(c, allocator, class_id); c->batch[c->count++] = p; + stats_.Sub(AllocatorStatAllocated, c->class_size); } void Drain(SizeClassAllocator *allocator) { for (uptr i = 0; i < kNumClasses; i++) { PerClass *c = &per_class_[i]; while (c->count > 0) - Drain(allocator, i); + Drain(c, allocator, i); } } @@ -216,8 +213,8 @@ PerClass per_class_[kNumClasses]; AllocatorStats stats_; - void InitCache() { - if (LIKELY(per_class_[1].max_count)) + void InitCache(PerClass *c) { + if (LIKELY(c->max_count)) return; const uptr batch_class_id = SizeClassMap::ClassID(sizeof(TransferBatch)); for (uptr i = 0; i < kNumClasses; i++) { @@ -237,11 +234,12 @@ batch_class_id : 0; } } + DCHECK_NE(c->max_count, 0UL); } - NOINLINE bool Refill(SizeClassAllocator *allocator, uptr class_id) { - InitCache(); - PerClass *c = &per_class_[class_id]; + NOINLINE bool Refill(PerClass *c, SizeClassAllocator *allocator, + uptr class_id) { + InitCache(c); TransferBatch *b = allocator->AllocateBatch(&stats_, this, class_id); if (UNLIKELY(!b)) return false; @@ -252,11 +250,10 @@ return true; } - NOINLINE void Drain(SizeClassAllocator *allocator, uptr class_id) { - InitCache(); - PerClass *c = &per_class_[class_id]; - uptr cnt = Min(c->max_count / 2, c->count); - uptr first_idx_to_drain = c->count - cnt; + NOINLINE void Drain(PerClass *c, SizeClassAllocator *allocator, + uptr class_id) { + const uptr count = Min(c->max_count / 2, c->count); + const uptr first_idx_to_drain = c->count - count; TransferBatch *b = CreateBatch( class_id, allocator, (TransferBatch *)c->batch[first_idx_to_drain]); // Failure to allocate a batch while releasing memory is non recoverable. @@ -264,8 +261,8 @@ if (UNLIKELY(!b)) DieOnFailure::OnOOM(); b->SetFromArray(allocator->GetRegionBeginBySizeClass(class_id), - &c->batch[first_idx_to_drain], cnt); - c->count -= cnt; + &c->batch[first_idx_to_drain], count); + c->count -= count; allocator->DeallocateBatch(&stats_, class_id, b); } };