diff --git a/compiler-rt/lib/asan/asan_allocator.cpp b/compiler-rt/lib/asan/asan_allocator.cpp --- a/compiler-rt/lib/asan/asan_allocator.cpp +++ b/compiler-rt/lib/asan/asan_allocator.cpp @@ -71,8 +71,7 @@ static const uptr kAllocBegMagic = 0xCC6E96B9; struct ChunkHeader { - u8 chunk_state; // Must be first. - u8 padding[3]; + atomic_uint32_t chunk_state; u32 alloc_tid : 24; u32 from_memalign : 1; u32 alloc_type : 2; @@ -86,7 +85,7 @@ // align < 8 -> 0 // else -> log2(min(align, 512)) - 2 u32 user_requested_alignment_log : 3; - u32 alloc_context_id; + atomic_uint32_t alloc_context_id; }; struct ChunkBase : ChunkHeader { @@ -140,8 +139,12 @@ } void Recycle(AsanChunk *m) { - CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); - atomic_store((atomic_uint8_t *)m, CHUNK_INVALID, memory_order_relaxed); + u32 old_chunk_state = CHUNK_QUARANTINE; + if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, + CHUNK_INVALID, memory_order_acquire)) { + CHECK_EQ(old_chunk_state, CHUNK_QUARANTINE); + } + CHECK_NE(m->alloc_tid, kInvalidTid); CHECK_NE(m->free_tid, kInvalidTid); PoisonShadow(m->Beg(), @@ -301,22 +304,25 @@ // housekeeping chunk, like TransferBatch. Start by assuming the former. AsanChunk *ac = GetAsanChunk((void *)chunk); uptr allocated_size = allocator.GetActuallyAllocatedSize((void *)ac); - uptr beg = ac->Beg(); - uptr end = ac->Beg() + ac->UsedSize(true); - uptr chunk_end = chunk + allocated_size; - if (chunk < beg && beg < end && end <= chunk_end && - ac->chunk_state == CHUNK_ALLOCATED) { - // Looks like a valid AsanChunk in use, poison redzones only. - PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); - uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY); - FastPoisonShadowPartialRightRedzone( - end_aligned_down, end - end_aligned_down, - chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); - } else { - // This is either not an AsanChunk or freed or quarantined AsanChunk. - // In either case, poison everything. - PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); + if (atomic_load(&ac->chunk_state, memory_order_acquire) == + CHUNK_ALLOCATED) { + uptr beg = ac->Beg(); + uptr end = ac->Beg() + ac->UsedSize(true); + uptr chunk_end = chunk + allocated_size; + if (chunk < beg && beg < end && end <= chunk_end) { + // Looks like a valid AsanChunk in use, poison redzones only. + PoisonShadow(chunk, beg - chunk, kAsanHeapLeftRedzoneMagic); + uptr end_aligned_down = RoundDownTo(end, SHADOW_GRANULARITY); + FastPoisonShadowPartialRightRedzone( + end_aligned_down, end - end_aligned_down, + chunk_end - end_aligned_down, kAsanHeapLeftRedzoneMagic); + return; + } } + + // This is either not an AsanChunk or freed or quarantined AsanChunk. + // In either case, poison everything. + PoisonShadow(chunk, allocated_size, kAsanHeapLeftRedzoneMagic); } void ReInitialize(const AllocatorOptions &options) { @@ -381,14 +387,18 @@ AsanChunk *right_chunk) { // Prefer an allocated chunk over freed chunk and freed chunk // over available chunk. - if (left_chunk->chunk_state != right_chunk->chunk_state) { - if (left_chunk->chunk_state == CHUNK_ALLOCATED) + u32 left_state = + atomic_load(&left_chunk->chunk_state, memory_order_relaxed); + u32 right_state = + atomic_load(&right_chunk->chunk_state, memory_order_relaxed); + if (left_state != right_state) { + if (left_state == CHUNK_ALLOCATED) return left_chunk; - if (right_chunk->chunk_state == CHUNK_ALLOCATED) + if (right_state == CHUNK_ALLOCATED) return right_chunk; - if (left_chunk->chunk_state == CHUNK_QUARANTINE) + if (left_state == CHUNK_QUARANTINE) return left_chunk; - if (right_chunk->chunk_state == CHUNK_QUARANTINE) + if (right_state == CHUNK_QUARANTINE) return right_chunk; } // Same chunk_state: choose based on offset. @@ -403,9 +413,10 @@ bool UpdateAllocationStack(uptr addr, BufferedStackTrace *stack) { AsanChunk *m = GetAsanChunkByAddr(addr); if (!m) return false; - if (m->chunk_state != CHUNK_ALLOCATED) return false; + if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) + return false; if (m->Beg() != addr) return false; - atomic_store((atomic_uint32_t *)&m->alloc_context_id, StackDepotPut(*stack), + atomic_store(&m->alloc_context_id, StackDepotPut(*stack), memory_order_relaxed); return true; } @@ -523,7 +534,8 @@ } m->user_requested_alignment_log = user_requested_alignment_log; - m->alloc_context_id = StackDepotPut(*stack); + atomic_store(&m->alloc_context_id, StackDepotPut(*stack), + memory_order_relaxed); uptr size_rounded_down_to_granularity = RoundDownTo(size, SHADOW_GRANULARITY); @@ -556,7 +568,7 @@ : __lsan::kDirectlyLeaked; #endif // Must be the last mutation of metadata in this function. - atomic_store((atomic_uint8_t *)m, CHUNK_ALLOCATED, memory_order_release); + atomic_store(&m->chunk_state, CHUNK_ALLOCATED, memory_order_release); ASAN_MALLOC_HOOK(res, size); return res; } @@ -564,10 +576,10 @@ // Set quarantine flag if chunk is allocated, issue ASan error report on // available and quarantined chunks. Return true on success, false otherwise. bool AtomicallySetQuarantineFlagIfAllocated(AsanChunk *m, void *ptr, - BufferedStackTrace *stack) { - u8 old_chunk_state = CHUNK_ALLOCATED; + BufferedStackTrace *stack) { + u32 old_chunk_state = CHUNK_ALLOCATED; // Flip the chunk_state atomically to avoid race on double-free. - if (!atomic_compare_exchange_strong((atomic_uint8_t *)m, &old_chunk_state, + if (!atomic_compare_exchange_strong(&m->chunk_state, &old_chunk_state, CHUNK_QUARANTINE, memory_order_acquire)) { ReportInvalidFree(ptr, old_chunk_state, stack); @@ -584,7 +596,8 @@ // Expects the chunk to already be marked as quarantined by using // AtomicallySetQuarantineFlagIfAllocated. void QuarantineChunk(AsanChunk *m, void *ptr, BufferedStackTrace *stack) { - CHECK_EQ(m->chunk_state, CHUNK_QUARANTINE); + CHECK_EQ(atomic_load(&m->chunk_state, memory_order_relaxed), + CHUNK_QUARANTINE); AsanThread *t = GetCurrentThread(); m->free_tid = t ? t->tid() : 0; m->free_context_id = StackDepotPut(*stack); @@ -676,7 +689,7 @@ void *new_ptr = Allocate(new_size, 8, stack, FROM_MALLOC, true); if (new_ptr) { - u8 chunk_state = m->chunk_state; + u32 chunk_state = atomic_load(&m->chunk_state, memory_order_acquire); if (chunk_state != CHUNK_ALLOCATED) ReportInvalidFree(old_ptr, chunk_state, stack); CHECK_NE(REAL(memcpy), nullptr); @@ -703,7 +716,8 @@ return ptr; } - void ReportInvalidFree(void *ptr, u8 chunk_state, BufferedStackTrace *stack) { + void ReportInvalidFree(void *ptr, u32 chunk_state, + BufferedStackTrace *stack) { if (chunk_state == CHUNK_QUARANTINE) ReportDoubleFree((uptr)ptr, stack); else @@ -779,7 +793,8 @@ uptr AllocationSize(uptr p) { AsanChunk *m = GetAsanChunkByAddr(p); if (!m) return 0; - if (m->chunk_state != CHUNK_ALLOCATED) return 0; + if (atomic_load(&m->chunk_state, memory_order_acquire) != CHUNK_ALLOCATED) + return 0; if (m->Beg() != p) return 0; return m->UsedSize(); } @@ -845,13 +860,16 @@ } bool AsanChunkView::IsValid() const { - return chunk_ && chunk_->chunk_state != CHUNK_INVALID; + return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) != + CHUNK_INVALID; } bool AsanChunkView::IsAllocated() const { - return chunk_ && chunk_->chunk_state == CHUNK_ALLOCATED; + return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) == + CHUNK_ALLOCATED; } bool AsanChunkView::IsQuarantined() const { - return chunk_ && chunk_->chunk_state == CHUNK_QUARANTINE; + return chunk_ && atomic_load(&chunk_->chunk_state, memory_order_relaxed) == + CHUNK_QUARANTINE; } uptr AsanChunkView::Beg() const { return chunk_->Beg(); } uptr AsanChunkView::End() const { return Beg() + UsedSize(); } @@ -874,7 +892,9 @@ return res; } -u32 AsanChunkView::GetAllocStackId() const { return chunk_->alloc_context_id; } +u32 AsanChunkView::GetAllocStackId() const { + return atomic_load(&chunk_->alloc_context_id, memory_order_relaxed); +} u32 AsanChunkView::GetFreeStackId() const { return IsQuarantined() ? chunk_->free_context_id : 0; } @@ -1061,10 +1081,10 @@ uptr PointsIntoChunk(void* p) { uptr addr = reinterpret_cast(p); __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddrFastLocked(addr); - if (!m) return 0; - uptr chunk = m->Beg(); - if (m->chunk_state != __asan::CHUNK_ALLOCATED) + if (!m || atomic_load(&m->chunk_state, memory_order_acquire) != + __asan::CHUNK_ALLOCATED) return 0; + uptr chunk = m->Beg(); if (m->AddrIsInside(addr, /*locked_version=*/true)) return chunk; if (IsSpecialCaseOfOperatorNew0(chunk, m->UsedSize(/*locked_version*/ true), @@ -1106,7 +1126,8 @@ bool LsanMetadata::allocated() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->chunk_state == __asan::CHUNK_ALLOCATED; + return atomic_load(&m->chunk_state, memory_order_relaxed) == + __asan::CHUNK_ALLOCATED; } ChunkTag LsanMetadata::tag() const { @@ -1126,7 +1147,7 @@ u32 LsanMetadata::stack_trace_id() const { __asan::AsanChunk *m = reinterpret_cast<__asan::AsanChunk *>(metadata_); - return m->alloc_context_id; + return atomic_load(&m->alloc_context_id, memory_order_relaxed); } void ForEachChunk(ForEachChunkCallback callback, void *arg) { @@ -1137,7 +1158,9 @@ uptr addr = reinterpret_cast(p); __asan::AsanChunk *m = __asan::instance.GetAsanChunkByAddr(addr); if (!m) return kIgnoreObjectInvalid; - if ((m->chunk_state == __asan::CHUNK_ALLOCATED) && m->AddrIsInside(addr)) { + if ((atomic_load(&m->chunk_state, memory_order_acquire) == + __asan::CHUNK_ALLOCATED) && + m->AddrIsInside(addr)) { if (m->lsan_tag == kIgnored) return kIgnoreObjectAlreadyIgnored; m->lsan_tag = __lsan::kIgnored;