diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -18,6 +18,7 @@ #include "quarantine.h" #include "report.h" #include "secondary.h" +#include "stack_depot.h" #include "string_utils.h" #include "tsd.h" @@ -31,6 +32,14 @@ extern "C" inline void EmptyCallback() {} +#if SCUDO_ANDROID && __ANDROID_API__ == 10000 +// This function is not part of the NDK so it does not appear in any public +// header files. We only declare/use it when targeting the platform (i.e. API +// level 10000). +extern "C" size_t android_unsafe_frame_pointer_chase(scudo::uptr *buf, + size_t num_entries); +#endif + namespace scudo { enum class Option { ReleaseInterval }; @@ -142,6 +151,7 @@ Options.ZeroContents = getFlags()->zero_contents; Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch; Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch; + Options.TrackAllocationStacks = false; Options.QuarantineMaxChunkSize = static_cast(getFlags()->quarantine_max_chunk_size); @@ -221,6 +231,19 @@ return Ptr; } + NOINLINE u32 collectStackTrace() { +#if SCUDO_ANDROID && __ANDROID_API__ == 10000 + enum { kStackSize = 64 }; + uptr Stack[kStackSize]; + uptr Size = android_unsafe_frame_pointer_chase(Stack, kStackSize); + Size = Min(Size, kStackSize); + // + 2 to discard collectStackTrace() frame and allocator function frame. + return Depot.insert(Stack + Min(2, Size), Stack + Size); +#else + return 0; +#endif + } + NOINLINE void *allocate(uptr Size, Chunk::Origin Origin, uptr Alignment = MinAlignment, bool ZeroContents = false) { @@ -359,9 +382,15 @@ PrevEnd = NextPage; TaggedPtr = reinterpret_cast(TaggedUserPtr); resizeTaggedChunk(PrevEnd, TaggedUserPtr + Size, BlockEnd); + if (ZeroContents && Size) { + // Clear any stack metadata that may have previously been stored in + // the chunk data. + memset(TaggedPtr, 0, 16); + } } else { TaggedPtr = prepareTaggedChunk(Ptr, Size, BlockEnd); } + storeAllocationStackMaybe(Ptr); } else if (UNLIKELY(ZeroContents)) { // This condition is not necessarily unlikely, but since memset is // costly, we might as well mark it as such. @@ -515,10 +544,12 @@ : BlockEnd - (reinterpret_cast(OldPtr) + NewSize)) & Chunk::SizeOrUnusedBytesMask; Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader); - if (UNLIKELY(ClassId && useMemoryTagging())) + if (UNLIKELY(ClassId && useMemoryTagging())) { resizeTaggedChunk(reinterpret_cast(OldTaggedPtr) + OldSize, reinterpret_cast(OldTaggedPtr) + NewSize, BlockEnd); + storeAllocationStackMaybe(OldPtr); + } return OldTaggedPtr; } } @@ -689,6 +720,130 @@ void disableMemoryTagging() { Primary.disableMemoryTagging(); } + void setTrackAllocationStacks(bool Track) { + Options.TrackAllocationStacks = Track; + } + + const char *getStackDepotAddress() const { + return reinterpret_cast(&Depot); + } + + const char *getRegionInfoArrayAddress() const { + return Primary.getRegionInfoArrayAddress(); + } + + static uptr getRegionInfoArraySize() { + return PrimaryT::getRegionInfoArraySize(); + } + + static void getErrorInfo(struct scudo_error_info *ErrorInfo, uintptr_t Ptr, + const char *DepotPtr, const char *RegionInfoPtr, + const char *Memory, const char *MemoryTags, + uintptr_t MemoryAddr, size_t MemorySize) { + *ErrorInfo = {}; + if (!PrimaryT::SupportsMemoryTagging) + return; + + uptr UntaggedPtr = untagPointer(Ptr); + u8 PtrTag = extractTag(Ptr); + BlockInfo Info = PrimaryT::findNearestBlock(RegionInfoPtr, UntaggedPtr); + + auto GetGranule = [&](uptr Addr, const char **Data, uint8_t *Tag) -> bool { + if (Addr < MemoryAddr || Addr >= MemoryAddr + MemorySize) + return false; + *Data = &Memory[Addr - MemoryAddr]; + *Tag = uint8_t( + MemoryTags[(Addr - MemoryAddr) / archMemoryTagGranuleSize()]); + return true; + }; + + auto ReadBlock = [&](uptr Addr, uptr *ChunkAddr, + Chunk::UnpackedHeader *Header, const u32 **Data, + u8 *Tag) { + const char *BlockBegin; + u8 BlockBeginTag; + if (!GetGranule(Addr, &BlockBegin, &BlockBeginTag)) + return false; + uptr ChunkOffset = getChunkOffsetFromBlock(BlockBegin); + *ChunkAddr = Addr + ChunkOffset; + + const char *ChunkBegin; + if (!GetGranule(*ChunkAddr, &ChunkBegin, Tag)) + return false; + *Header = *reinterpret_cast( + ChunkBegin - Chunk::getHeaderSize()); + *Data = reinterpret_cast(ChunkBegin); + return true; + }; + + auto *Depot = reinterpret_cast(DepotPtr); + + auto MaybeCollectTrace = [&](uintptr_t(&Trace)[64], u32 Hash) { + uptr RingPos, Size; + if (!Depot->find(Hash, &RingPos, &Size)) + return; + for (unsigned I = 0; I != Size && I != 64; ++I) + Trace[I] = (*Depot)[RingPos + I]; + }; + + size_t NextErrorReport = 0; + + // First, check for UAF. + { + uptr ChunkAddr; + Chunk::UnpackedHeader Header; + const u32 *Data; + uint8_t Tag; + if (ReadBlock(Info.BlockBegin, &ChunkAddr, &Header, &Data, &Tag) && + Header.State != Chunk::State::Allocated && + Data[MemTagPrevTagIndex] == PtrTag) { + auto *R = &ErrorInfo->reports[NextErrorReport++]; + R->error_type = USE_AFTER_FREE; + R->allocation_address = ChunkAddr; + R->allocation_size = Header.SizeOrUnusedBytes; + MaybeCollectTrace(R->allocation_trace, + Data[MemTagAllocationTraceIndex]); + R->allocation_tid = Data[MemTagAllocationTidIndex]; + MaybeCollectTrace(R->deallocation_trace, + Data[MemTagDeallocationTraceIndex]); + R->deallocation_tid = Data[MemTagDeallocationTidIndex]; + } + } + + auto CheckOOB = [&](uptr BlockAddr) { + if (BlockAddr < Info.RegionBegin || BlockAddr >= Info.RegionEnd) + return false; + + uptr ChunkAddr; + Chunk::UnpackedHeader Header; + const u32 *Data; + uint8_t Tag; + if (!ReadBlock(BlockAddr, &ChunkAddr, &Header, &Data, &Tag) || + Header.State != Chunk::State::Allocated || Tag != PtrTag) + return false; + + auto *R = &ErrorInfo->reports[NextErrorReport++]; + R->error_type = + UntaggedPtr < ChunkAddr ? BUFFER_UNDERFLOW : BUFFER_OVERFLOW; + R->allocation_address = ChunkAddr; + R->allocation_size = Header.SizeOrUnusedBytes; + MaybeCollectTrace(R->allocation_trace, Data[MemTagAllocationTraceIndex]); + R->allocation_tid = Data[MemTagAllocationTidIndex]; + return NextErrorReport == + sizeof(ErrorInfo->reports) / sizeof(ErrorInfo->reports[0]); + }; + + if (CheckOOB(Info.BlockBegin)) + return; + + // Check for OOB in the 30 surrounding blocks. Beyond that we are likely to + // hit false positives. + for (int I = 1; I != 16; ++I) + if (CheckOOB(Info.BlockBegin + I * Info.BlockSize) || + CheckOOB(Info.BlockBegin - I * Info.BlockSize)) + return; + } + private: using SecondaryT = typename Params::Secondary; typedef typename PrimaryT::SizeClassMap SizeClassMap; @@ -708,6 +863,22 @@ static const u32 BlockMarker = 0x44554353U; + // These are indexes into an "array" of 32-bit values that store information + // inline with a chunk that is relevant to diagnosing memory tag faults, where + // 0 corresponds to the address of the user memory. This means that negative + // indexes may be used to store information about allocations, while positive + // indexes may only be used to store information about deallocations, because + // the user memory is in use until it has been deallocated. The smallest index + // that may be used is -2, which corresponds to 8 bytes before the user + // memory, because the chunk header size is 8 bytes and in allocators that + // support memory tagging the minimum alignment is at least the tag granule + // size (16 on aarch64). + static const sptr MemTagAllocationTraceIndex = -2; + static const sptr MemTagAllocationTidIndex = -1; + static const sptr MemTagDeallocationTraceIndex = 0; + static const sptr MemTagDeallocationTidIndex = 1; + static const sptr MemTagPrevTagIndex = 2; + GlobalStats Stats; TSDRegistryT TSDRegistry; PrimaryT Primary; @@ -721,6 +892,7 @@ u8 ZeroContents : 1; // zero_contents u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch u8 DeleteSizeMismatch : 1; // delete_size_mismatch + u8 TrackAllocationStacks : 1; u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size } Options; @@ -728,6 +900,8 @@ gwp_asan::GuardedPoolAllocator GuardedAlloc; #endif // GWP_ASAN_HOOKS + StackDepot Depot; + // The following might get optimized out by the compiler. NOINLINE void performSanityChecks() { // Verify that the header offset field can hold the maximum offset. In the @@ -787,8 +961,10 @@ uptr Size) { Chunk::UnpackedHeader NewHeader = *Header; if (UNLIKELY(NewHeader.ClassId && useMemoryTagging())) { + u8 PrevTag = extractTag(loadTag(reinterpret_cast(Ptr))); uptr TaggedBegin, TaggedEnd; setRandomTag(Ptr, Size, &TaggedBegin, &TaggedEnd); + storeDeallocationStackMaybe(Ptr, PrevTag); } // If the quarantine is disabled, the actual size of a chunk is 0 or larger // than the maximum allowed, we return a chunk directly to the backend. @@ -824,13 +1000,39 @@ bool getChunkFromBlock(uptr Block, uptr *Chunk, Chunk::UnpackedHeader *Header) { - u32 Offset = 0; - if (reinterpret_cast(Block)[0] == BlockMarker) - Offset = reinterpret_cast(Block)[1]; - *Chunk = Block + Offset + Chunk::getHeaderSize(); + *Chunk = + Block + getChunkOffsetFromBlock(reinterpret_cast(Block)); return Chunk::isValid(Cookie, reinterpret_cast(*Chunk), Header); } + static uptr getChunkOffsetFromBlock(const char *Block) { + u32 Offset = 0; + if (reinterpret_cast(Block)[0] == BlockMarker) + Offset = reinterpret_cast(Block)[1]; + return Offset + Chunk::getHeaderSize(); + } + + void storeAllocationStackMaybe(void *Ptr) { + if (!UNLIKELY(Options.TrackAllocationStacks)) + return; + auto *Ptr32 = reinterpret_cast(Ptr); + Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(); + Ptr32[MemTagAllocationTidIndex] = getThreadID(); + } + + void storeDeallocationStackMaybe(void *Ptr, uint8_t PrevTag) { + if (!UNLIKELY(Options.TrackAllocationStacks)) + return; + + // Disable tag checks here so that we don't need to worry about zero sized + // allocations. + ScopedDisableMemoryTagChecks x; + auto *Ptr32 = reinterpret_cast(Ptr); + Ptr32[MemTagDeallocationTraceIndex] = collectStackTrace(); + Ptr32[MemTagDeallocationTidIndex] = getThreadID(); + Ptr32[MemTagPrevTagIndex] = PrevTag; + } + uptr getStats(ScopedString *Str) { Primary.getStats(Str); Secondary.getStats(Str); diff --git a/compiler-rt/lib/scudo/standalone/common.h b/compiler-rt/lib/scudo/standalone/common.h --- a/compiler-rt/lib/scudo/standalone/common.h +++ b/compiler-rt/lib/scudo/standalone/common.h @@ -133,6 +133,8 @@ u64 getMonotonicTime(); +u32 getThreadID(); + // Our randomness gathering function is limited to 256 bytes to ensure we get // as many bytes as requested, and avoid interruptions (on Linux). constexpr uptr MaxRandomLength = 256U; @@ -173,6 +175,13 @@ void setAbortMessage(const char *Message); +struct BlockInfo { + uptr BlockBegin; + uptr BlockSize; + uptr RegionBegin; + uptr RegionEnd; +}; + } // namespace scudo #endif // SCUDO_COMMON_H_ diff --git a/compiler-rt/lib/scudo/standalone/fuchsia.cpp b/compiler-rt/lib/scudo/standalone/fuchsia.cpp --- a/compiler-rt/lib/scudo/standalone/fuchsia.cpp +++ b/compiler-rt/lib/scudo/standalone/fuchsia.cpp @@ -170,6 +170,8 @@ u32 getNumberOfCPUs() { return _zx_system_get_num_cpus(); } +u32 getThreadID() { return 0; } + bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) { static_assert(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN, ""); if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength)) diff --git a/compiler-rt/lib/scudo/standalone/include/scudo/interface.h b/compiler-rt/lib/scudo/standalone/include/scudo/interface.h --- a/compiler-rt/lib/scudo/standalone/include/scudo/interface.h +++ b/compiler-rt/lib/scudo/standalone/include/scudo/interface.h @@ -9,6 +9,8 @@ #ifndef SCUDO_INTERFACE_H_ #define SCUDO_INTERFACE_H_ +#include + extern "C" { __attribute__((weak)) const char *__scudo_default_options(); @@ -22,6 +24,79 @@ typedef void (*iterate_callback)(uintptr_t base, size_t size, void *arg); +// Determine the likely cause of a tag check fault or other memory protection +// error on a system with memory tagging support. The results are returned via +// the error_info data structure. Up to three possible causes are returned in +// the reports array, in decreasing order of probability. The remaining elements +// of reports are zero-initialized. +// +// This function may be called from a different process from the one that +// crashed. In this case, various data structures must be copied from the +// crashing process to the process that analyzes the crash. +// +// ptr is the fault address. On aarch64 this is available in the system register +// FAR_ELx, or far_context.far in an upcoming release of the Linux kernel. This +// address must include the pointer tag; note that the kernel strips the tag +// from the fields siginfo.si_addr and sigcontext.fault_address, so these +// addresses are not suitable to be passed as ptr. +// +// stack_depot is a pointer to the stack depot data structure, which may be +// obtained by calling the function __scudo_get_stack_depot_addr() in the +// crashing process. The size of the stack depot is available by calling the +// function __scudo_get_stack_depot_size(). +// +// region_info is a pointer to the region info data structure, which may be +// obtained by calling the function __scudo_get_region_info_addr() in the +// crashing process. The size of the region info is available by calling the +// function __scudo_get_region_info_size(). +// +// memory is a pointer to a region of memory surrounding the fault address. +// The more memory available via this pointer, the more likely it is that the +// function will be able to analyze a crash correctly. +// +// memory_tags is a pointer to an array of memory tags for the memory accessed +// via memory. Each byte of this array corresponds to a region of memory of size +// equal to the architecturally defined memory tag granule size (16 on aarch64). +// +// memory_addr is the address of memory in the crashing process's address space. +// +// memory_size is the size of the memory region referred to by the memory +// pointer. +void __scudo_get_error_info(struct scudo_error_info *error_info, uintptr_t ptr, + const char *stack_depot, const char *region_info, + const char *memory, const char *memory_tags, + uintptr_t memory_addr, size_t memory_size); + +enum scudo_error_type { + UNKNOWN, + USE_AFTER_FREE, + BUFFER_OVERFLOW, + BUFFER_UNDERFLOW, +}; + +struct scudo_error_report { + enum scudo_error_type error_type; + + uintptr_t allocation_address; + uintptr_t allocation_size; + + uint32_t allocation_tid; + uintptr_t allocation_trace[64]; + + uint32_t deallocation_tid; + uintptr_t deallocation_trace[64]; +}; + +struct scudo_error_info { + struct scudo_error_report reports[3]; +}; + +const char *__scudo_get_stack_depot_addr(); +size_t __scudo_get_stack_depot_size(); + +const char *__scudo_get_region_info_addr(); +size_t __scudo_get_region_info_size(); + } // extern "C" #endif // SCUDO_INTERFACE_H_ diff --git a/compiler-rt/lib/scudo/standalone/linux.cpp b/compiler-rt/lib/scudo/standalone/linux.cpp --- a/compiler-rt/lib/scudo/standalone/linux.cpp +++ b/compiler-rt/lib/scudo/standalone/linux.cpp @@ -139,6 +139,14 @@ return static_cast(CPU_COUNT(&CPUs)); } +u32 getThreadID() { +#if SCUDO_ANDROID + return static_cast(gettid()); +#else + return static_cast(syscall(SYS_gettid)); +#endif +} + // Blocking is possibly unused if the getrandom block is not compiled in. bool getRandom(void *Buffer, uptr Length, UNUSED bool Blocking) { if (!Buffer || !Length || Length > MaxRandomLength) diff --git a/compiler-rt/lib/scudo/standalone/memtag.h b/compiler-rt/lib/scudo/standalone/memtag.h --- a/compiler-rt/lib/scudo/standalone/memtag.h +++ b/compiler-rt/lib/scudo/standalone/memtag.h @@ -51,6 +51,20 @@ __asm__ __volatile__(".arch_extension mte; msr tco, #0"); } +class ScopedDisableMemoryTagChecks { + size_t PrevTCO; + + public: + ScopedDisableMemoryTagChecks() { + __asm__ __volatile__(".arch_extension mte; mrs %0, tco; msr tco, #1" + : "=r"(PrevTCO)); + } + + ~ScopedDisableMemoryTagChecks() { + __asm__ __volatile__(".arch_extension mte; msr tco, %0" : : "r"(PrevTCO)); + } +}; + inline uptr untagPointer(uptr Ptr) { return Ptr & ((1ULL << 56) - 1); } inline void setRandomTag(void *Ptr, uptr Size, uptr *TaggedBegin, @@ -167,6 +181,10 @@ return TaggedPtr; } +inline uint8_t extractTag(uptr Ptr) { + return Ptr >> 56; +} + #else inline constexpr bool archSupportsMemoryTagging() { return false; } @@ -191,6 +209,10 @@ UNREACHABLE("memory tagging not supported"); } +struct ScopedDisableMemoryTagChecks { + ScopedDisableMemoryTagChecks() {} +}; + inline uptr untagPointer(uptr Ptr) { (void)Ptr; UNREACHABLE("memory tagging not supported"); @@ -224,6 +246,11 @@ UNREACHABLE("memory tagging not supported"); } +inline uint8_t extractTag(uptr Ptr) { + (void)Ptr; + UNREACHABLE("memory tagging not supported"); +} + #endif } // namespace scudo diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -206,6 +206,15 @@ bool useMemoryTagging() { return false; } void disableMemoryTagging() {} + const char *getRegionInfoArrayAddress() const { return nullptr; } + static uptr getRegionInfoArraySize() { return 0; } + + static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) { + (void)RegionInfoData; + (void)Ptr; + return {}; + } + private: static const uptr NumClasses = SizeClassMap::NumClasses; static const uptr RegionSize = 1UL << RegionSizeLog; diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -209,6 +209,55 @@ } void disableMemoryTagging() { UseMemoryTagging = false; } + const char *getRegionInfoArrayAddress() const { + return reinterpret_cast(RegionInfoArray); + } + + static uptr getRegionInfoArraySize() { + return sizeof(RegionInfoArray); + } + + static BlockInfo findNearestBlock(const char *RegionInfoData, uptr Ptr) { + const RegionInfo *RegionInfoArray = + reinterpret_cast(RegionInfoData); + uptr ClassId; + uptr MinDistance = -1UL; + for (uptr I = 0; I != NumClasses; ++I) { + if (I == SizeClassMap::BatchClassId) + continue; + uptr Begin = RegionInfoArray[I].RegionBeg; + uptr End = Begin + RegionInfoArray[I].AllocatedUser; + uptr RegionDistance; + if (Begin <= Ptr) { + if (Ptr < End) + RegionDistance = 0; + else + RegionDistance = Ptr - End; + } else { + RegionDistance = Begin - Ptr; + } + + if (RegionDistance < MinDistance) { + MinDistance = RegionDistance; + ClassId = I; + } + } + + BlockInfo B = {}; + if (MinDistance <= 8192) { + B.RegionBegin = RegionInfoArray[ClassId].RegionBeg; + B.RegionEnd = B.RegionBegin + RegionInfoArray[ClassId].AllocatedUser; + B.BlockSize = SizeClassMap::getSizeByClassId(ClassId); + B.BlockBegin = + B.RegionBegin + (Ptr - B.RegionBegin) / B.BlockSize * B.BlockSize; + while (B.BlockBegin < B.RegionBegin) + B.BlockBegin += B.BlockSize; + while (B.RegionEnd < B.BlockBegin + B.BlockSize) + B.BlockBegin -= B.BlockSize; + } + return B; + } + private: static const uptr RegionSize = 1UL << RegionSizeLog; static const uptr NumClasses = SizeClassMap::NumClasses; diff --git a/compiler-rt/lib/scudo/standalone/stack_depot.h b/compiler-rt/lib/scudo/standalone/stack_depot.h new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/scudo/standalone/stack_depot.h @@ -0,0 +1,103 @@ +//===-- stack_depot.h -------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_STACK_DEPOT_H_ +#define SCUDO_STACK_DEPOT_H_ + +#include "mutex.h" + +namespace scudo { + +class MurMur2HashBuilder { + static const u32 m = 0x5bd1e995; + static const u32 seed = 0x9747b28c; + static const u32 r = 24; + u32 h; + + public: + explicit MurMur2HashBuilder(u32 init = 0) { h = seed ^ init; } + void add(u32 k) { + k *= m; + k ^= k >> r; + k *= m; + h *= m; + h ^= k; + } + u32 get() { + u32 x = h; + x ^= x >> 13; + x *= m; + x ^= x >> 15; + return x; + } +}; + +class StackDepot { + HybridMutex ring_end_mu; + u32 ring_end; + + static const uptr kTabBits = 16; + static const uptr kTabSize = 1 << kTabBits; + static const uptr kTabMask = kTabSize - 1; + u32 tab[kTabSize]; + + static const uptr kRingBits = 19; + static const uptr kRingSize = 1 << kRingBits; + static const uptr kRingMask = kRingSize - 1; + u64 ring[kRingSize]; + +public: + u32 insert(uptr *begin, uptr *end) { + MurMur2HashBuilder b; + for (uptr *i = begin; i != end; ++i) + b.add(u32(*i)); + u32 hash = b.get(); + + u32 pos = hash & kTabMask; + u32 ring_pos = tab[pos]; + u64 entry = ring[ring_pos]; + u64 id = (u64(end - begin) << 33) | (u64(hash) << 1) | 1; + if (entry == id) + return hash; + + ScopedLock lock(ring_end_mu); + ring_pos = ring_end; + tab[pos] = ring_pos; + ring[ring_pos] = id; + for (uptr *i = begin; i != end; ++i) { + ring_pos = (ring_pos + 1) & kRingMask; + ring[ring_pos] = *i; + } + ring_end = (ring_pos + 1) & kRingMask; + return hash; + } + + bool find(u32 hash, uptr *ring_pos_ptr, uptr *size_ptr) const { + u32 pos = hash & kTabMask; + u32 ring_pos = tab[pos]; + u64 entry = ring[ring_pos]; + u64 hash_with_tag_bit = (u64(hash) << 1) | 1; + if ((entry & 0x1ffffffff) != hash_with_tag_bit) + return false; + u32 size = entry >> 33; + *ring_pos_ptr = (ring_pos + 1) & kRingMask; + *size_ptr = size; + MurMur2HashBuilder b; + for (uptr i = 0; i != size; ++i) { + ring_pos = (ring_pos + 1) & kRingMask; + b.add(u32(ring[ring_pos])); + } + return b.get() == hash; + } + + u64 operator[](uptr ring_pos) const { return ring[ring_pos & kRingMask]; } +}; + +} // namespace scudo + +#endif // SCUDO_STACK_DEPOT_H_ diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc --- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc +++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc @@ -219,4 +219,9 @@ SCUDO_ALLOCATOR.disableMemoryTagging(); } +INTERFACE WEAK void +SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) { + SCUDO_ALLOCATOR.setTrackAllocationStacks(track); +} + } // extern "C" diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp --- a/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp +++ b/compiler-rt/lib/scudo/standalone/wrappers_c_bionic.cpp @@ -48,4 +48,29 @@ // TODO(kostyak): support both allocators. INTERFACE void __scudo_print_stats(void) { Allocator.printStats(); } +INTERFACE void +__scudo_get_error_info(struct scudo_error_info *error_info, uintptr_t ptr, + const char *stack_depot, const char *region_info, + const char *memory, const char *memory_tags, + uintptr_t memory_addr, size_t memory_size) { + Allocator.getErrorInfo(error_info, ptr, stack_depot, region_info, memory, + memory_tags, memory_addr, memory_size); +} + +INTERFACE const char *__scudo_get_stack_depot_addr() { + return Allocator.getStackDepotAddress(); +} + +INTERFACE size_t __scudo_get_stack_depot_size() { + return sizeof(scudo::StackDepot); +} + +INTERFACE const char *__scudo_get_region_info_addr() { + return Allocator.getRegionInfoArrayAddress(); +} + +INTERFACE size_t __scudo_get_region_info_size() { + return Allocator.getRegionInfoArraySize(); +} + #endif // SCUDO_ANDROID && _BIONIC