Index: lib/scudo/CMakeLists.txt =================================================================== --- lib/scudo/CMakeLists.txt +++ lib/scudo/CMakeLists.txt @@ -14,6 +14,8 @@ scudo_interceptors.cpp scudo_new_delete.cpp scudo_termination.cpp + scudo_thread_android.cpp + scudo_thread_linux.cpp scudo_utils.cpp) # Enable the SSE 4.2 instruction set for scudo_crc32.cpp, if available. Index: lib/scudo/scudo_allocator.h =================================================================== --- lib/scudo/scudo_allocator.h +++ lib/scudo/scudo_allocator.h @@ -82,8 +82,55 @@ void copyTo(Flags *f, CommonFlags *cf) const; }; +#if SANITIZER_CAN_USE_ALLOCATOR64 +const uptr AllocatorSpace = ~0ULL; +# if defined(__aarch64__) && SANITIZER_ANDROID +const uptr AllocatorSize = 0x4000000000ULL; // 256G. +# elif defined(__aarch64__) +// AArch64/SANITIZER_CAN_USER_ALLOCATOR64 is only for 42-bit VMA +// so no need to different values for different VMA. +const uptr kAllocatorSize = 0x10000000000ULL; // 1T. +# else +const uptr AllocatorSize = 0x40000000000ULL; // 4T. +# endif +typedef DefaultSizeClassMap SizeClassMap; +struct AP { + static const uptr kSpaceBeg = AllocatorSpace; + static const uptr kSpaceSize = AllocatorSize; + static const uptr kMetadataSize = 0; + typedef __scudo::SizeClassMap SizeClassMap; + typedef NoOpMapUnmapCallback MapUnmapCallback; + static const uptr kFlags = + SizeClassAllocator64FlagMasks::kRandomShuffleChunks; +}; +typedef SizeClassAllocator64 PrimaryAllocator; +#else +// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the +// security improvements brought to the 64-bit one. This makes the 32-bit +// version of Scudo slightly less toughened. +static const uptr RegionSizeLog = 20; +static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog; +# if SANITIZER_WORDSIZE == 32 +typedef FlatByteMap ByteMap; +# elif SANITIZER_WORDSIZE == 64 +typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap; +# endif // SANITIZER_WORDSIZE +typedef DefaultSizeClassMap SizeClassMap; +typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap, + RegionSizeLog, ByteMap> PrimaryAllocator; +#endif // SANITIZER_CAN_USE_ALLOCATOR64 + +#include "scudo_allocator_secondary.h" + +typedef SizeClassAllocatorLocalCache AllocatorCache; +typedef ScudoLargeMmapAllocator SecondaryAllocator; +typedef CombinedAllocator + ScudoBackendAllocator; + +ScudoBackendAllocator &getBackendAllocator(); + +void initScudo(); void initAllocator(const AllocatorOptions &options); -void drainQuarantine(); void *scudoMalloc(uptr Size, AllocType Type); void scudoFree(void *Ptr, AllocType Type); @@ -97,8 +144,6 @@ void *scudoAlignedAlloc(uptr Alignment, uptr Size); uptr scudoMallocUsableSize(void *Ptr); -#include "scudo_allocator_secondary.h" - } // namespace __scudo #endif // SCUDO_ALLOCATOR_H_ Index: lib/scudo/scudo_allocator.cpp =================================================================== --- lib/scudo/scudo_allocator.cpp +++ lib/scudo/scudo_allocator.cpp @@ -15,56 +15,16 @@ //===----------------------------------------------------------------------===// #include "scudo_allocator.h" +#include "scudo_thread.h" #include "scudo_utils.h" #include "sanitizer_common/sanitizer_allocator_interface.h" #include "sanitizer_common/sanitizer_quarantine.h" -#include -#include - -#include +#include namespace __scudo { -#if SANITIZER_CAN_USE_ALLOCATOR64 -const uptr AllocatorSpace = ~0ULL; -const uptr AllocatorSize = 0x40000000000ULL; -typedef DefaultSizeClassMap SizeClassMap; -struct AP { - static const uptr kSpaceBeg = AllocatorSpace; - static const uptr kSpaceSize = AllocatorSize; - static const uptr kMetadataSize = 0; - typedef __scudo::SizeClassMap SizeClassMap; - typedef NoOpMapUnmapCallback MapUnmapCallback; - static const uptr kFlags = - SizeClassAllocator64FlagMasks::kRandomShuffleChunks; -}; -typedef SizeClassAllocator64 PrimaryAllocator; -#else -// Currently, the 32-bit Sanitizer allocator has not yet benefited from all the -// security improvements brought to the 64-bit one. This makes the 32-bit -// version of Scudo slightly less toughened. -static const uptr RegionSizeLog = 20; -static const uptr NumRegions = SANITIZER_MMAP_RANGE_SIZE >> RegionSizeLog; -# if SANITIZER_WORDSIZE == 32 -typedef FlatByteMap ByteMap; -# elif SANITIZER_WORDSIZE == 64 -typedef TwoLevelByteMap<(NumRegions >> 12), 1 << 12> ByteMap; -# endif // SANITIZER_WORDSIZE -typedef DefaultSizeClassMap SizeClassMap; -typedef SizeClassAllocator32<0, SANITIZER_MMAP_RANGE_SIZE, 0, SizeClassMap, - RegionSizeLog, ByteMap> PrimaryAllocator; -#endif // SANITIZER_CAN_USE_ALLOCATOR64 - -typedef SizeClassAllocatorLocalCache AllocatorCache; -typedef ScudoLargeMmapAllocator SecondaryAllocator; -typedef CombinedAllocator - ScudoAllocator; - -static ScudoAllocator &getAllocator(); - -static thread_local Xorshift128Plus Prng; // Global static cookie, initialized at start-up. static uptr Cookie; @@ -101,9 +61,10 @@ // Returns the usable size for a chunk, meaning the amount of bytes from the // beginning of the user data to the end of the backend allocated chunk. uptr getUsableSize(UnpackedHeader *Header) { - uptr Size = getAllocator().GetActuallyAllocatedSize(getAllocBeg(Header)); + uptr Size = + getBackendAllocator().GetActuallyAllocatedSize(getAllocBeg(Header)); if (Size == 0) - return Size; + return 0; return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog); } @@ -120,7 +81,8 @@ return static_cast(Crc); } - // Checks the validity of a chunk by verifying its checksum. + // Checks the validity of a chunk by verifying its checksum. This check + // doesn't incur termination in case of an invalid chunk. bool isValid() { UnpackedHeader NewUnpackedHeader; const AtomicPackedHeader *AtomicHeader = @@ -130,13 +92,27 @@ return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader)); } + // Nulls out a chunk header. When returning the chunk to the backend, there + // is no need to store a valid ChunkAvailable header, as this would be + // computationally expensive. Zero-ing out serves the same purpose by making + // the header invalid. In the extremely rare event where 0 would be a valid + // checksum for the chunk, the state of the chunk is ChunkAvailable anyway. + COMPILER_CHECK(ChunkAvailable == 0); + void eraseHeader() { + PackedHeader NullPackedHeader = 0; + AtomicPackedHeader *AtomicHeader = + reinterpret_cast(this); + atomic_store_relaxed(AtomicHeader, NullPackedHeader); + } + // Loads and unpacks the header, verifying the checksum in the process. void loadHeader(UnpackedHeader *NewUnpackedHeader) const { const AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader); *NewUnpackedHeader = bit_cast(NewPackedHeader); - if (NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader)) { + if (UNLIKELY(NewUnpackedHeader->Checksum + != computeChecksum(NewUnpackedHeader))) { dieWithMessage("ERROR: corrupted chunk header at address %p\n", this); } } @@ -160,10 +136,10 @@ PackedHeader OldPackedHeader = bit_cast(*OldUnpackedHeader); AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); - if (!atomic_compare_exchange_strong(AtomicHeader, - &OldPackedHeader, - NewPackedHeader, - memory_order_relaxed)) { + if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader, + &OldPackedHeader, + NewPackedHeader, + memory_order_relaxed))) { dieWithMessage("ERROR: race on chunk header at address %p\n", this); } } @@ -171,30 +147,7 @@ static bool ScudoInitIsRunning = false; -static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT; -static pthread_key_t PThreadKey; - -static thread_local bool ThreadInited = false; -static thread_local bool ThreadTornDown = false; -static thread_local AllocatorCache Cache; - -static void teardownThread(void *p) { - uptr v = reinterpret_cast(p); - // The glibc POSIX thread-local-storage deallocation routine calls user - // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS. - // We want to be called last since other destructors might call free and the - // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the - // quarantine and swallowing the cache. - if (v < PTHREAD_DESTRUCTOR_ITERATIONS) { - pthread_setspecific(PThreadKey, reinterpret_cast(v + 1)); - return; - } - drainQuarantine(); - getAllocator().DestroyCache(&Cache); - ThreadTornDown = true; -} - -static void initInternal() { +void initScudo() { SanitizerToolName = "Scudo"; CHECK(!ScudoInitIsRunning && "Scudo init calls itself!"); ScudoInitIsRunning = true; @@ -205,28 +158,13 @@ } initFlags(); - AllocatorOptions Options; Options.setFrom(getFlags(), common_flags()); initAllocator(Options); - MaybeStartBackgroudThread(); - ScudoInitIsRunning = false; } -static void initGlobal() { - pthread_key_create(&PThreadKey, teardownThread); - initInternal(); -} - -static void NOINLINE initThread() { - pthread_once(&GlobalInited, initGlobal); - pthread_setspecific(PThreadKey, reinterpret_cast(1)); - getAllocator().InitCache(&Cache); - ThreadInited = true; -} - struct QuarantineCallback { explicit QuarantineCallback(AllocatorCache *Cache) : Cache_(Cache) {} @@ -235,38 +173,45 @@ void Recycle(ScudoChunk *Chunk) { UnpackedHeader Header; Chunk->loadHeader(&Header); - if (Header.State != ChunkQuarantine) { + if (UNLIKELY(Header.State != ChunkQuarantine)) { dieWithMessage("ERROR: invalid chunk state when recycling address %p\n", Chunk); } + Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(&Header); - getAllocator().Deallocate(Cache_, Ptr); + getBackendAllocator().Deallocate(Cache_, Ptr); } /// Internal quarantine allocation and deallocation functions. void *Allocate(uptr Size) { - // The internal quarantine memory cannot be protected by us. But the only - // structures allocated are QuarantineBatch, that are 8KB for x64. So we - // will use mmap for those, and given that Deallocate doesn't pass a size - // in, we enforce the size of the allocation to be sizeof(QuarantineBatch). - // TODO(kostyak): switching to mmap impacts greatly performances, we have - // to find another solution - // CHECK_EQ(Size, sizeof(QuarantineBatch)); - // return MmapOrDie(Size, "QuarantineBatch"); - return getAllocator().Allocate(Cache_, Size, 1, false); + // TODO(kostyak): figure out the best way to protect the batches. + return getBackendAllocator().Allocate(Cache_, Size, MinAlignment); } void Deallocate(void *Ptr) { - // UnmapOrDie(Ptr, sizeof(QuarantineBatch)); - getAllocator().Deallocate(Cache_, Ptr); + getBackendAllocator().Deallocate(Cache_, Ptr); } AllocatorCache *Cache_; }; typedef Quarantine ScudoQuarantine; -typedef ScudoQuarantine::Cache QuarantineCache; -static thread_local QuarantineCache ThreadQuarantineCache; +typedef ScudoQuarantine::Cache ScudoQuarantineCache; +COMPILER_CHECK(sizeof(ScudoQuarantineCache) + <= sizeof(ScudoThreadContext::QuarantineCachePlaceHolder)); + +AllocatorCache *getAllocatorCache(ScudoThreadContext *ThreadContext) { + return &ThreadContext->Cache; +} + +ScudoQuarantineCache *getQuarantineCache(ScudoThreadContext *ThreadContext) { + return reinterpret_cast< + ScudoQuarantineCache *>(ThreadContext->QuarantineCachePlaceHolder); +} + +Xorshift128Plus *getPrng(ScudoThreadContext *ThreadContext) { + return &ThreadContext->Prng; +} void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) { MayReturnNull = cf->allocator_may_return_null; @@ -288,11 +233,11 @@ f->ZeroContents = ZeroContents; } -struct Allocator { +struct ScudoAllocator { static const uptr MaxAllowedMallocSize = FIRST_32_SECOND_64(2UL << 30, 1ULL << 40); - ScudoAllocator BackendAllocator; + ScudoBackendAllocator BackendAllocator; ScudoQuarantine AllocatorQuarantine; // The fallback caches are used when the thread local caches have been @@ -300,13 +245,14 @@ // be accessed by different threads. StaticSpinMutex FallbackMutex; AllocatorCache FallbackAllocatorCache; - QuarantineCache FallbackQuarantineCache; + ScudoQuarantineCache FallbackQuarantineCache; + Xorshift128Plus GlobalPrng; bool DeallocationTypeMismatch; bool ZeroContents; bool DeleteSizeMismatch; - explicit Allocator(LinkerInitialized) + explicit ScudoAllocator(LinkerInitialized) : AllocatorQuarantine(LINKER_INITIALIZED), FallbackQuarantineCache(LINKER_INITIALIZED) {} @@ -348,38 +294,38 @@ AllocatorQuarantine.Init( static_cast(Options.QuarantineSizeMb) << 20, static_cast(Options.ThreadLocalQuarantineSizeKb) << 10); + FallbackMutex.Init(); BackendAllocator.InitCache(&FallbackAllocatorCache); - Cookie = Prng.Next(); + GlobalPrng.initFromURandom(); + Cookie = GlobalPrng.getNext(); } - // Helper function that checks for a valid Scudo chunk. + // Helper function that checks for a valid Scudo chunk. nullptr isn't. bool isValidPointer(const void *UserPtr) { - if (UNLIKELY(!ThreadInited)) - initThread(); - uptr ChunkBeg = reinterpret_cast(UserPtr); - if (!IsAligned(ChunkBeg, MinAlignment)) { + initThreadMaybe(); + if (!UserPtr) return false; - } - ScudoChunk *Chunk = - reinterpret_cast(ChunkBeg - AlignedChunkHeaderSize); - return Chunk->isValid(); + uptr UserBeg = reinterpret_cast(UserPtr); + if (!IsAligned(UserBeg, MinAlignment)) + return false; + return getScudoChunk(UserBeg)->isValid(); } // Allocates a chunk. - void *allocate(uptr Size, uptr Alignment, AllocType Type) { - if (UNLIKELY(!ThreadInited)) - initThread(); - if (!IsPowerOfTwo(Alignment)) { + void *allocate(uptr Size, uptr Alignment, AllocType Type, + bool ForceZeroContents = false) { + initThreadMaybe(); + if (UNLIKELY(!IsPowerOfTwo(Alignment))) { dieWithMessage("ERROR: alignment is not a power of 2\n"); } if (Alignment > MaxAlignment) return BackendAllocator.ReturnNullOrDieOnBadRequest(); if (Alignment < MinAlignment) Alignment = MinAlignment; - if (Size == 0) - Size = 1; if (Size >= MaxAllowedMallocSize) return BackendAllocator.ReturnNullOrDieOnBadRequest(); + if (Size == 0) + Size = 1; uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize; if (Alignment > MinAlignment) @@ -394,14 +340,23 @@ // combined allocator to accommodate the situation. bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment); + uptr Salt; void *Ptr; - if (LIKELY(!ThreadTornDown)) { - Ptr = BackendAllocator.Allocate(&Cache, NeededSize, - FromPrimary ? MinAlignment : Alignment); + uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment; + ScudoThreadContext *ThreadContext = getCurrentThreadContext(); + // Based on what backs the ThreadContext, the locking mechanism differ (it + // might not be locked at all for thread_local variables). + if (ThreadContext) { + ThreadContext->Lock(); + Salt = getPrng(ThreadContext)->getNext(); + Ptr = BackendAllocator.Allocate(getAllocatorCache(ThreadContext), + NeededSize, AllocationAlignment); + ThreadContext->Unlock(); } else { - SpinMutexLock l(&FallbackMutex); - Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize, - FromPrimary ? MinAlignment : Alignment); + SpinMutexLock Lock(&FallbackMutex); + Salt = GlobalPrng.getNext(); + Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, + NeededSize, AllocationAlignment); } if (!Ptr) return BackendAllocator.ReturnNullOrDieOnOOM(); @@ -419,109 +374,131 @@ uptr ActuallyAllocatedSize = BackendAllocator.GetActuallyAllocatedSize( reinterpret_cast(AllocBeg)); // If requested, we will zero out the entire contents of the returned chunk. - if (ZeroContents && FromPrimary) + if ((ForceZeroContents || ZeroContents) && FromPrimary) memset(Ptr, 0, ActuallyAllocatedSize); - uptr ChunkBeg = AllocBeg + AlignedChunkHeaderSize; - if (!IsAligned(ChunkBeg, Alignment)) - ChunkBeg = RoundUpTo(ChunkBeg, Alignment); - CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize); - ScudoChunk *Chunk = - reinterpret_cast(ChunkBeg - AlignedChunkHeaderSize); + uptr UserBeg = AllocBeg + AlignedChunkHeaderSize; + if (!IsAligned(UserBeg, Alignment)) + UserBeg = RoundUpTo(UserBeg, Alignment); + CHECK_LE(UserBeg + Size, AllocBeg + NeededSize); UnpackedHeader Header = {}; Header.State = ChunkAllocated; - uptr Offset = ChunkBeg - AlignedChunkHeaderSize - AllocBeg; + uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg; Header.Offset = Offset >> MinAlignmentLog; Header.AllocType = Type; Header.UnusedBytes = ActuallyAllocatedSize - Offset - AlignedChunkHeaderSize - Size; - Header.Salt = static_cast(Prng.Next()); - Chunk->storeHeader(&Header); - void *UserPtr = reinterpret_cast(ChunkBeg); - // TODO(kostyak): hooks sound like a terrible idea security wise but might - // be needed for things to work properly? + Header.Salt = static_cast(Salt); + getScudoChunk(UserBeg)->storeHeader(&Header); + void *UserPtr = reinterpret_cast(UserBeg); // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size); return UserPtr; } + // Place a chunk in the quarantine. In the event of a zero-sized quarantine, + // we directly deallocate the chunk, otherwise the flow would lead to the + // chunk being checksummed twice, once before Put and once in Recycle, with + // no additional security value. + void quarantineOrDeallocateChunk(ScudoChunk *Chunk, UnpackedHeader *Header, + uptr Size) { + bool BypassQuarantine = (AllocatorQuarantine.GetCacheSize() == 0); + ScudoThreadContext *ThreadContext = getCurrentThreadContext(); + if (BypassQuarantine) { + Chunk->eraseHeader(); + void *Ptr = Chunk->getAllocBeg(Header); + if (ThreadContext) { + ThreadContext->Lock(); + getBackendAllocator().Deallocate(getAllocatorCache(ThreadContext), Ptr); + ThreadContext->Unlock(); + } else { + SpinMutexLock Lock(&FallbackMutex); + getBackendAllocator().Deallocate(&FallbackAllocatorCache, Ptr); + } + } else { + UnpackedHeader NewHeader = *Header; + NewHeader.State = ChunkQuarantine; + Chunk->compareExchangeHeader(&NewHeader, Header); + if (ThreadContext) { + ThreadContext->Lock(); + AllocatorQuarantine.Put(getQuarantineCache(ThreadContext), + QuarantineCallback( + getAllocatorCache(ThreadContext)), + Chunk, Size); + ThreadContext->Unlock(); + } else { + SpinMutexLock Lock(&FallbackMutex); + AllocatorQuarantine.Put(&FallbackQuarantineCache, + QuarantineCallback(&FallbackAllocatorCache), + Chunk, Size); + } + } + } + // Deallocates a Chunk, which means adding it to the delayed free list (or - // Quarantine). + // Quarantine) or returning it to the backend. void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) { - if (UNLIKELY(!ThreadInited)) - initThread(); - // TODO(kostyak): see hook comment above + initThreadMaybe(); // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr); if (!UserPtr) return; - uptr ChunkBeg = reinterpret_cast(UserPtr); - if (!IsAligned(ChunkBeg, MinAlignment)) { + uptr UserBeg = reinterpret_cast(UserPtr); + if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) { dieWithMessage("ERROR: attempted to deallocate a chunk not properly " "aligned at address %p\n", UserPtr); } - ScudoChunk *Chunk = - reinterpret_cast(ChunkBeg - AlignedChunkHeaderSize); + ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader OldHeader; Chunk->loadHeader(&OldHeader); - if (OldHeader.State != ChunkAllocated) { + if (UNLIKELY(OldHeader.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when deallocating address " "%p\n", UserPtr); } - uptr UsableSize = Chunk->getUsableSize(&OldHeader); - UnpackedHeader NewHeader = OldHeader; - NewHeader.State = ChunkQuarantine; - Chunk->compareExchangeHeader(&NewHeader, &OldHeader); if (DeallocationTypeMismatch) { // The deallocation type has to match the allocation one. - if (NewHeader.AllocType != Type) { + if (OldHeader.AllocType != Type) { // With the exception of memalign'd Chunks, that can be still be free'd. - if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) { + if (OldHeader.AllocType != FromMemalign || Type != FromMalloc) { dieWithMessage("ERROR: allocation type mismatch on address %p\n", - Chunk); + UserPtr); } } } - uptr Size = UsableSize - OldHeader.UnusedBytes; + uptr UsableSize = Chunk->getUsableSize(&OldHeader); if (DeleteSizeMismatch) { + uptr Size = UsableSize - OldHeader.UnusedBytes; if (DeleteSize && DeleteSize != Size) { dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n", - Chunk); + UserPtr); } } - - if (LIKELY(!ThreadTornDown)) { - AllocatorQuarantine.Put(&ThreadQuarantineCache, - QuarantineCallback(&Cache), Chunk, UsableSize); - } else { - SpinMutexLock l(&FallbackMutex); - AllocatorQuarantine.Put(&FallbackQuarantineCache, - QuarantineCallback(&FallbackAllocatorCache), - Chunk, UsableSize); - } + quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize); } // Reallocates a chunk. We can save on a new allocation if the new requested // size still fits in the chunk. void *reallocate(void *OldPtr, uptr NewSize) { - if (UNLIKELY(!ThreadInited)) - initThread(); - uptr ChunkBeg = reinterpret_cast(OldPtr); - ScudoChunk *Chunk = - reinterpret_cast(ChunkBeg - AlignedChunkHeaderSize); + initThreadMaybe(); + uptr UserBeg = reinterpret_cast(OldPtr); + if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) { + dieWithMessage("ERROR: attempted to reallocate a chunk not properly " + "aligned at address %p\n", OldPtr); + } + ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader OldHeader; Chunk->loadHeader(&OldHeader); - if (OldHeader.State != ChunkAllocated) { + if (UNLIKELY(OldHeader.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when reallocating address " "%p\n", OldPtr); } - uptr Size = Chunk->getUsableSize(&OldHeader); - if (OldHeader.AllocType != FromMalloc) { + if (UNLIKELY(OldHeader.AllocType != FromMalloc)) { dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n", - Chunk); + OldPtr); } - UnpackedHeader NewHeader = OldHeader; + uptr UsableSize = Chunk->getUsableSize(&OldHeader); // The new size still fits in the current chunk. - if (NewSize <= Size) { - NewHeader.UnusedBytes = Size - NewSize; + if (NewSize <= UsableSize) { + UnpackedHeader NewHeader = OldHeader; + NewHeader.UnusedBytes = UsableSize - NewSize; Chunk->compareExchangeHeader(&NewHeader, &OldHeader); return OldPtr; } @@ -529,36 +506,28 @@ // old one. void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); if (NewPtr) { - uptr OldSize = Size - OldHeader.UnusedBytes; + uptr OldSize = UsableSize - OldHeader.UnusedBytes; memcpy(NewPtr, OldPtr, Min(NewSize, OldSize)); - NewHeader.State = ChunkQuarantine; - Chunk->compareExchangeHeader(&NewHeader, &OldHeader); - if (LIKELY(!ThreadTornDown)) { - AllocatorQuarantine.Put(&ThreadQuarantineCache, - QuarantineCallback(&Cache), Chunk, Size); - } else { - SpinMutexLock l(&FallbackMutex); - AllocatorQuarantine.Put(&FallbackQuarantineCache, - QuarantineCallback(&FallbackAllocatorCache), - Chunk, Size); - } + quarantineOrDeallocateChunk(Chunk, &OldHeader, UsableSize); } return NewPtr; } + ScudoChunk *getScudoChunk(uptr UserBeg) { + return reinterpret_cast(UserBeg - AlignedChunkHeaderSize); + } + // Helper function that returns the actual usable size of a chunk. uptr getUsableSize(const void *Ptr) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); if (!Ptr) return 0; - uptr ChunkBeg = reinterpret_cast(Ptr); - ScudoChunk *Chunk = - reinterpret_cast(ChunkBeg - AlignedChunkHeaderSize); + uptr UserBeg = reinterpret_cast(Ptr); + ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader Header; Chunk->loadHeader(&Header); // Getting the usable size of a chunk only makes sense if it's allocated. - if (Header.State != ChunkAllocated) { + if (UNLIKELY(Header.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when sizing address %p\n", Ptr); } @@ -566,35 +535,31 @@ } void *calloc(uptr NMemB, uptr Size) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); uptr Total = NMemB * Size; - if (Size != 0 && Total / Size != NMemB) // Overflow check + if (Size != 0 && Total / Size != NMemB) // Overflow check return BackendAllocator.ReturnNullOrDieOnBadRequest(); - void *Ptr = allocate(Total, MinAlignment, FromMalloc); - // If ZeroContents, the content of the chunk has already been zero'd out. - if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr)) - memset(Ptr, 0, getUsableSize(Ptr)); - return Ptr; + return allocate(Total, MinAlignment, FromMalloc, true); } - void drainQuarantine() { - AllocatorQuarantine.Drain(&ThreadQuarantineCache, - QuarantineCallback(&Cache)); + void commitBack(ScudoThreadContext *ThreadContext) { + AllocatorCache *Cache = getAllocatorCache(ThreadContext); + AllocatorQuarantine.Drain(getQuarantineCache(ThreadContext), + QuarantineCallback(Cache)); + BackendAllocator.DestroyCache(Cache); } uptr getStats(AllocatorStat StatType) { - if (UNLIKELY(!ThreadInited)) - initThread(); + initThreadMaybe(); uptr stats[AllocatorStatCount]; BackendAllocator.GetStats(stats); return stats[StatType]; } }; -static Allocator Instance(LINKER_INITIALIZED); +static ScudoAllocator Instance(LINKER_INITIALIZED); -static ScudoAllocator &getAllocator() { +ScudoBackendAllocator &getBackendAllocator() { return Instance.BackendAllocator; } @@ -602,8 +567,8 @@ Instance.init(Options); } -void drainQuarantine() { - Instance.drainQuarantine(); +void ScudoThreadContext::commitBack() { + Instance.commitBack(this); } void *scudoMalloc(uptr Size, AllocType Type) { Index: lib/scudo/scudo_allocator_secondary.h =================================================================== --- lib/scudo/scudo_allocator_secondary.h +++ lib/scudo/scudo_allocator_secondary.h @@ -88,8 +88,11 @@ // The primary adds the whole class size to the stats when allocating a // chunk, so we will do something similar here. But we will not account for // the guard pages. - Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize); - Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize); + { + SpinMutexLock l(&StatsMutex); + Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize); + Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize); + } return reinterpret_cast(UserBeg); } @@ -112,8 +115,11 @@ void Deallocate(AllocatorStats *Stats, void *Ptr) { SecondaryHeader *Header = getHeader(Ptr); - Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize); - Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize); + { + SpinMutexLock l(&StatsMutex); + Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize); + Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize); + } UnmapOrDie(reinterpret_cast(Header->MapBeg), Header->MapSize); } @@ -127,7 +133,7 @@ uptr GetActuallyAllocatedSize(void *Ptr) { SecondaryHeader *Header = getHeader(Ptr); - // Deduct PageSize as MapEnd includes the trailing guard page. + // Deduct PageSize as MapSize includes the trailing guard page. uptr MapEnd = Header->MapBeg + Header->MapSize - PageSize; return MapEnd - reinterpret_cast(Ptr); } @@ -182,6 +188,7 @@ const uptr SecondaryHeaderSize = sizeof(SecondaryHeader); const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize; uptr PageSize; + SpinMutex StatsMutex; atomic_uint8_t MayReturnNull; }; Index: lib/scudo/scudo_thread.h =================================================================== --- /dev/null +++ lib/scudo/scudo_thread.h @@ -0,0 +1,48 @@ +//===-- scudo_thread.h ------------------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Scudo thread local structure definition. +/// Implementation varies based on the thread local primitives offered by the +/// underlying platform. +/// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_THREAD_H_ +#define SCUDO_THREAD_H_ + +#include "scudo_allocator.h" +#include "scudo_utils.h" + +#include "sanitizer_common/sanitizer_platform.h" + +namespace __scudo { + +struct ScudoThreadContext { + public: + StaticSpinMutex Mutex; + AllocatorCache Cache; + Xorshift128Plus Prng; + uptr QuarantineCachePlaceHolder[4]; + void init() { + Mutex.Init(); + getBackendAllocator().InitCache(&Cache); + Prng.initFromURandom(); + memset(QuarantineCachePlaceHolder, 0, sizeof(QuarantineCachePlaceHolder)); + } + void commitBack(); + void Lock(); + void Unlock(); +}; + +void initThreadMaybe(); +ScudoThreadContext *getCurrentThreadContext(); + +} // namespace __scudo + +#endif // SCUDO_THREAD_H_ Index: lib/scudo/scudo_thread_android.cpp =================================================================== --- /dev/null +++ lib/scudo/scudo_thread_android.cpp @@ -0,0 +1,74 @@ +//===-- scudo_thread_android.cpp --------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Scudo thread local implementation for Android, as it currently only supports +/// thread_local via emutls, which doesn't work for our purposes. +/// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" + +#if SANITIZER_LINUX && (SANITIZER_ANDROID || defined(SCUDO_NO_TLS)) + +#include "scudo_thread.h" + +#include "sanitizer_common/sanitizer_atomic.h" + +#include + +namespace __scudo { + +static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT; +static pthread_key_t PThreadKey; + +// For Android, and possibly other platforms with stronger memory constraints, +// we can't necessarily afford a context per thread. The alternative adopted +// here is a fixed set of contexts (the number being defined at compile time), +// where contexts are assigned to new threads in a round-robin fashion. + +#ifndef SCUDO_N_CONTEXTS +# define SCUDO_N_CONTEXTS 4 +#endif + +static atomic_uint32_t ThreadContextCurrentIndex; +static ScudoThreadContext *ThreadContexts; + +static void initOnce() { + CHECK_EQ(pthread_key_create(&PThreadKey, NULL), 0); + initScudo(); + ThreadContexts = reinterpret_cast( + MmapOrDie(sizeof(ScudoThreadContext) * SCUDO_N_CONTEXTS, __func__)); + for (int i = 0; i < SCUDO_N_CONTEXTS; i++) + ThreadContexts[i].init(); +} + +void initThreadMaybe() { + pthread_once(&GlobalInited, initOnce); + if (pthread_getspecific(PThreadKey) == NULL) { + u32 Index = atomic_fetch_add(&ThreadContextCurrentIndex, 1, + memory_order_relaxed); + ScudoThreadContext *ThreadContext = + &ThreadContexts[Index % SCUDO_N_CONTEXTS]; + pthread_setspecific(PThreadKey, ThreadContext); + } +} + +ScudoThreadContext *getCurrentThreadContext() { + ScudoThreadContext *ThreadContext = + reinterpret_cast(pthread_getspecific(PThreadKey)); + CHECK(ThreadContext); + return ThreadContext; +} + +void ScudoThreadContext::Lock() { Mutex.Lock(); } +void ScudoThreadContext::Unlock() { Mutex.Unlock(); } + +} // namespace __scudo + +#endif Index: lib/scudo/scudo_thread_linux.cpp =================================================================== --- /dev/null +++ lib/scudo/scudo_thread_linux.cpp @@ -0,0 +1,73 @@ +//===-- scudo_thread_linux.cpp ----------------------------------*- C++ -*-===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +/// +/// Scudo thread local implementation for platforms supporting thread_local. +/// +//===----------------------------------------------------------------------===// + +#include "sanitizer_common/sanitizer_platform.h" + +#if SANITIZER_LINUX && !SANITIZER_ANDROID && !defined(SCUDO_NO_TLS) + +#include "scudo_thread.h" + +#include +#include + +namespace __scudo { + +static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT; +static pthread_key_t PThreadKey; + +static thread_local bool ThreadInited = false; +static thread_local bool ThreadTornDown = false; +static thread_local ScudoThreadContext ThreadLocalContext; + +static void teardownThread(void *Ptr) { + uptr Iteration = reinterpret_cast(Ptr); + // The glibc POSIX thread-local-storage deallocation routine calls user + // provided destructors in a loop of PTHREAD_DESTRUCTOR_ITERATIONS. + // We want to be called last since other destructors might call free and the + // like, so we wait until PTHREAD_DESTRUCTOR_ITERATIONS before draining the + // quarantine and swallowing the cache. + if (Iteration < PTHREAD_DESTRUCTOR_ITERATIONS) { + pthread_setspecific(PThreadKey, reinterpret_cast(Iteration + 1)); + return; + } + ThreadLocalContext.commitBack(); + ThreadTornDown = true; +} + + +static void initOnce() { + CHECK_EQ(pthread_key_create(&PThreadKey, teardownThread), 0); + initScudo(); +} + +void initThreadMaybe() { + if (LIKELY(ThreadInited)) + return; + pthread_once(&GlobalInited, initOnce); + pthread_setspecific(PThreadKey, reinterpret_cast(1)); + ThreadLocalContext.init(); + ThreadInited = true; +} + +ScudoThreadContext *getCurrentThreadContext() { + if (ThreadTornDown) + return nullptr; + return &ThreadLocalContext; +} + +void ScudoThreadContext::Lock() {} +void ScudoThreadContext::Unlock() {} + +} // namespace __scudo + +#endif // SANITIZER_LINUX && !SANITIZER_ANDROID Index: lib/scudo/scudo_utils.h =================================================================== --- lib/scudo/scudo_utils.h +++ lib/scudo/scudo_utils.h @@ -14,10 +14,10 @@ #ifndef SCUDO_UTILS_H_ #define SCUDO_UTILS_H_ -#include - #include "sanitizer_common/sanitizer_common.h" +#include + namespace __scudo { template @@ -40,8 +40,8 @@ // The state (128 bits) will be stored in thread local storage. struct Xorshift128Plus { public: - Xorshift128Plus(); - u64 Next() { + void initFromURandom(); + u64 getNext() { u64 x = State[0]; const u64 y = State[1]; State[0] = y; @@ -58,7 +58,59 @@ CRC32Hardware = 1, }; -u32 computeSoftwareCRC32(u32 Crc, uptr Data); +const static u32 CRC32Table[] = { + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, + 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, + 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, + 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, + 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, + 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, + 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, + 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, + 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, + 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, + 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, + 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, + 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, + 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, + 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, + 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, + 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, + 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e, + 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, + 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, + 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, + 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, + 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, + 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, + 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, + 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, + 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, + 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, + 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d +}; + +INLINE u32 computeSoftwareCRC32(u32 Crc, uptr Data) { + for (uptr i = 0; i < sizeof(Data); i++) { + Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8); + Data >>= 8; + } + return Crc; +} } // namespace __scudo Index: lib/scudo/scudo_utils.cpp =================================================================== --- lib/scudo/scudo_utils.cpp +++ lib/scudo/scudo_utils.cpp @@ -153,64 +153,9 @@ } } -// Default constructor for Xorshift128Plus seeds the state with /dev/urandom. // TODO(kostyak): investigate using getrandom() if available. -Xorshift128Plus::Xorshift128Plus() { +void Xorshift128Plus::initFromURandom() { fillRandom(reinterpret_cast(State), sizeof(State)); } -const static u32 CRC32Table[] = { - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, - 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, - 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, - 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, - 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, - 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, - 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, - 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, - 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, - 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, - 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, - 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, - 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, - 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, - 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, - 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, - 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, - 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, - 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, - 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, - 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, - 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, - 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, - 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, - 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, - 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e, - 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, - 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, - 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, - 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, - 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, - 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, - 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, - 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, - 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, - 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, - 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, - 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, - 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, - 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, - 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d -}; - -u32 computeSoftwareCRC32(u32 Crc, uptr Data) { - for (uptr i = 0; i < sizeof(Data); i++) { - Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8); - Data >>= 8; - } - return Crc; -} - } // namespace __scudo