Index: lib/scudo/scudo_allocator.h =================================================================== --- lib/scudo/scudo_allocator.h +++ lib/scudo/scudo_allocator.h @@ -41,8 +41,7 @@ // using functions such as GetBlockBegin, that is fairly costly. Our first // implementation used the MetaData as well, which offers the advantage of // being stored away from the chunk itself, but accessing it was costly as -// well. The header will be atomically loaded and stored using the 16-byte -// primitives offered by the platform (likely requires cmpxchg16b support). +// well. The header will be atomically loaded and stored. typedef u64 PackedHeader; struct UnpackedHeader { u64 Checksum : 16; Index: lib/scudo/scudo_allocator.cpp =================================================================== --- lib/scudo/scudo_allocator.cpp +++ lib/scudo/scudo_allocator.cpp @@ -22,8 +22,7 @@ #include #include - -#include +#include namespace __scudo { @@ -60,9 +59,9 @@ typedef SizeClassAllocatorLocalCache AllocatorCache; typedef ScudoLargeMmapAllocator SecondaryAllocator; typedef CombinedAllocator - ScudoAllocator; + ScudoBackendAllocator; -static ScudoAllocator &getAllocator(); +static ScudoBackendAllocator &getBackendAllocator(); static thread_local Xorshift128Plus Prng; // Global static cookie, initialized at start-up. @@ -101,9 +100,10 @@ // Returns the usable size for a chunk, meaning the amount of bytes from the // beginning of the user data to the end of the backend allocated chunk. uptr getUsableSize(UnpackedHeader *Header) { - uptr Size = getAllocator().GetActuallyAllocatedSize(getAllocBeg(Header)); + uptr Size = getBackendAllocator().GetActuallyAllocatedSize( + getAllocBeg(Header)); if (Size == 0) - return Size; + return 0; return Size - AlignedChunkHeaderSize - (Header->Offset << MinAlignmentLog); } @@ -120,7 +120,8 @@ return static_cast(Crc); } - // Checks the validity of a chunk by verifying its checksum. + // Checks the validity of a chunk by verifying its checksum. It doesn't + // incur termination in the event of an invalid chunk. bool isValid() { UnpackedHeader NewUnpackedHeader; const AtomicPackedHeader *AtomicHeader = @@ -130,13 +131,27 @@ return (NewUnpackedHeader.Checksum == computeChecksum(&NewUnpackedHeader)); } + // Nulls out a chunk header. When returning the chunk to the backend, there + // is no need to store a valid ChunkAvailable header, as this would be + // computationally expensive. Zeroing out serves the same purpose by making + // the header invalid. In the extremely rare event where 0 would be a valid + // checksum for the chunk, the state of the chunk is ChunkAvailable anyway. + COMPILER_CHECK(ChunkAvailable == 0); + void eraseHeader() { + PackedHeader NullPackedHeader = 0; + AtomicPackedHeader *AtomicHeader = + reinterpret_cast(this); + atomic_store_relaxed(AtomicHeader, NullPackedHeader); + } + // Loads and unpacks the header, verifying the checksum in the process. void loadHeader(UnpackedHeader *NewUnpackedHeader) const { const AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); PackedHeader NewPackedHeader = atomic_load_relaxed(AtomicHeader); *NewUnpackedHeader = bit_cast(NewPackedHeader); - if (NewUnpackedHeader->Checksum != computeChecksum(NewUnpackedHeader)) { + if (UNLIKELY(NewUnpackedHeader->Checksum != + computeChecksum(NewUnpackedHeader))) { dieWithMessage("ERROR: corrupted chunk header at address %p\n", this); } } @@ -160,15 +175,19 @@ PackedHeader OldPackedHeader = bit_cast(*OldUnpackedHeader); AtomicPackedHeader *AtomicHeader = reinterpret_cast(this); - if (!atomic_compare_exchange_strong(AtomicHeader, - &OldPackedHeader, - NewPackedHeader, - memory_order_relaxed)) { + if (UNLIKELY(!atomic_compare_exchange_strong(AtomicHeader, + &OldPackedHeader, + NewPackedHeader, + memory_order_relaxed))) { dieWithMessage("ERROR: race on chunk header at address %p\n", this); } } }; +ScudoChunk *getScudoChunk(uptr UserBeg) { + return reinterpret_cast(UserBeg - AlignedChunkHeaderSize); +} + static bool ScudoInitIsRunning = false; static pthread_once_t GlobalInited = PTHREAD_ONCE_INIT; @@ -190,7 +209,7 @@ return; } drainQuarantine(); - getAllocator().DestroyCache(&Cache); + getBackendAllocator().DestroyCache(&Cache); ThreadTornDown = true; } @@ -223,7 +242,7 @@ static void NOINLINE initThread() { pthread_once(&GlobalInited, initGlobal); pthread_setspecific(PThreadKey, reinterpret_cast(1)); - getAllocator().InitCache(&Cache); + getBackendAllocator().InitCache(&Cache); ThreadInited = true; } @@ -235,38 +254,31 @@ void Recycle(ScudoChunk *Chunk) { UnpackedHeader Header; Chunk->loadHeader(&Header); - if (Header.State != ChunkQuarantine) { + if (UNLIKELY(Header.State != ChunkQuarantine)) { dieWithMessage("ERROR: invalid chunk state when recycling address %p\n", Chunk); } + Chunk->eraseHeader(); void *Ptr = Chunk->getAllocBeg(&Header); - getAllocator().Deallocate(Cache_, Ptr); + getBackendAllocator().Deallocate(Cache_, Ptr); } /// Internal quarantine allocation and deallocation functions. void *Allocate(uptr Size) { - // The internal quarantine memory cannot be protected by us. But the only - // structures allocated are QuarantineBatch, that are 8KB for x64. So we - // will use mmap for those, and given that Deallocate doesn't pass a size - // in, we enforce the size of the allocation to be sizeof(QuarantineBatch). - // TODO(kostyak): switching to mmap impacts greatly performances, we have - // to find another solution - // CHECK_EQ(Size, sizeof(QuarantineBatch)); - // return MmapOrDie(Size, "QuarantineBatch"); - return getAllocator().Allocate(Cache_, Size, 1, false); + // TODO(kostyak): figure out the best way to protect the batches. + return getBackendAllocator().Allocate(Cache_, Size, MinAlignment); } void Deallocate(void *Ptr) { - // UnmapOrDie(Ptr, sizeof(QuarantineBatch)); - getAllocator().Deallocate(Cache_, Ptr); + getBackendAllocator().Deallocate(Cache_, Ptr); } AllocatorCache *Cache_; }; typedef Quarantine ScudoQuarantine; -typedef ScudoQuarantine::Cache QuarantineCache; -static thread_local QuarantineCache ThreadQuarantineCache; +typedef ScudoQuarantine::Cache ScudoQuarantineCache; +static thread_local ScudoQuarantineCache ThreadQuarantineCache; void AllocatorOptions::setFrom(const Flags *f, const CommonFlags *cf) { MayReturnNull = cf->allocator_may_return_null; @@ -288,11 +300,11 @@ f->ZeroContents = ZeroContents; } -struct Allocator { +struct ScudoAllocator { static const uptr MaxAllowedMallocSize = FIRST_32_SECOND_64(2UL << 30, 1ULL << 40); - ScudoAllocator BackendAllocator; + ScudoBackendAllocator BackendAllocator; ScudoQuarantine AllocatorQuarantine; // The fallback caches are used when the thread local caches have been @@ -300,13 +312,13 @@ // be accessed by different threads. StaticSpinMutex FallbackMutex; AllocatorCache FallbackAllocatorCache; - QuarantineCache FallbackQuarantineCache; + ScudoQuarantineCache FallbackQuarantineCache; bool DeallocationTypeMismatch; bool ZeroContents; bool DeleteSizeMismatch; - explicit Allocator(LinkerInitialized) + explicit ScudoAllocator(LinkerInitialized) : AllocatorQuarantine(LINKER_INITIALIZED), FallbackQuarantineCache(LINKER_INITIALIZED) {} @@ -349,37 +361,37 @@ static_cast(Options.QuarantineSizeMb) << 20, static_cast(Options.ThreadLocalQuarantineSizeKb) << 10); BackendAllocator.InitCache(&FallbackAllocatorCache); - Cookie = Prng.Next(); + Cookie = Prng.getNext(); } - // Helper function that checks for a valid Scudo chunk. + // Helper function that checks for a valid Scudo chunk. nullptr isn't. bool isValidPointer(const void *UserPtr) { if (UNLIKELY(!ThreadInited)) initThread(); - uptr ChunkBeg = reinterpret_cast(UserPtr); - if (!IsAligned(ChunkBeg, MinAlignment)) { + if (!UserPtr) return false; - } - ScudoChunk *Chunk = - reinterpret_cast(ChunkBeg - AlignedChunkHeaderSize); - return Chunk->isValid(); + uptr UserBeg = reinterpret_cast(UserPtr); + if (!IsAligned(UserBeg, MinAlignment)) + return false; + return getScudoChunk(UserBeg)->isValid(); } // Allocates a chunk. - void *allocate(uptr Size, uptr Alignment, AllocType Type) { + void *allocate(uptr Size, uptr Alignment, AllocType Type, + bool ForceZeroContents = false) { if (UNLIKELY(!ThreadInited)) initThread(); - if (!IsPowerOfTwo(Alignment)) { + if (UNLIKELY(!IsPowerOfTwo(Alignment))) { dieWithMessage("ERROR: alignment is not a power of 2\n"); } if (Alignment > MaxAlignment) return BackendAllocator.ReturnNullOrDieOnBadRequest(); if (Alignment < MinAlignment) Alignment = MinAlignment; - if (Size == 0) - Size = 1; if (Size >= MaxAllowedMallocSize) return BackendAllocator.ReturnNullOrDieOnBadRequest(); + if (Size == 0) + Size = 1; uptr NeededSize = RoundUpTo(Size, MinAlignment) + AlignedChunkHeaderSize; if (Alignment > MinAlignment) @@ -395,13 +407,13 @@ bool FromPrimary = PrimaryAllocator::CanAllocate(NeededSize, MinAlignment); void *Ptr; + uptr AllocationAlignment = FromPrimary ? MinAlignment : Alignment; if (LIKELY(!ThreadTornDown)) { - Ptr = BackendAllocator.Allocate(&Cache, NeededSize, - FromPrimary ? MinAlignment : Alignment); + Ptr = BackendAllocator.Allocate(&Cache, NeededSize, AllocationAlignment); } else { SpinMutexLock l(&FallbackMutex); Ptr = BackendAllocator.Allocate(&FallbackAllocatorCache, NeededSize, - FromPrimary ? MinAlignment : Alignment); + AllocationAlignment); } if (!Ptr) return BackendAllocator.ReturnNullOrDieOnOOM(); @@ -419,27 +431,23 @@ uptr ActuallyAllocatedSize = BackendAllocator.GetActuallyAllocatedSize( reinterpret_cast(AllocBeg)); // If requested, we will zero out the entire contents of the returned chunk. - if (ZeroContents && FromPrimary) + if ((ForceZeroContents || ZeroContents) && FromPrimary) memset(Ptr, 0, ActuallyAllocatedSize); - uptr ChunkBeg = AllocBeg + AlignedChunkHeaderSize; - if (!IsAligned(ChunkBeg, Alignment)) - ChunkBeg = RoundUpTo(ChunkBeg, Alignment); - CHECK_LE(ChunkBeg + Size, AllocBeg + NeededSize); - ScudoChunk *Chunk = - reinterpret_cast(ChunkBeg - AlignedChunkHeaderSize); + uptr UserBeg = AllocBeg + AlignedChunkHeaderSize; + if (!IsAligned(UserBeg, Alignment)) + UserBeg = RoundUpTo(UserBeg, Alignment); + CHECK_LE(UserBeg + Size, AllocBeg + NeededSize); UnpackedHeader Header = {}; Header.State = ChunkAllocated; - uptr Offset = ChunkBeg - AlignedChunkHeaderSize - AllocBeg; + uptr Offset = UserBeg - AlignedChunkHeaderSize - AllocBeg; Header.Offset = Offset >> MinAlignmentLog; Header.AllocType = Type; Header.UnusedBytes = ActuallyAllocatedSize - Offset - AlignedChunkHeaderSize - Size; - Header.Salt = static_cast(Prng.Next()); - Chunk->storeHeader(&Header); - void *UserPtr = reinterpret_cast(ChunkBeg); - // TODO(kostyak): hooks sound like a terrible idea security wise but might - // be needed for things to work properly? + Header.Salt = static_cast(Prng.getNext()); + getScudoChunk(UserBeg)->storeHeader(&Header); + void *UserPtr = reinterpret_cast(UserBeg); // if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(UserPtr, Size); return UserPtr; } @@ -449,45 +457,44 @@ void deallocate(void *UserPtr, uptr DeleteSize, AllocType Type) { if (UNLIKELY(!ThreadInited)) initThread(); - // TODO(kostyak): see hook comment above // if (&__sanitizer_free_hook) __sanitizer_free_hook(UserPtr); if (!UserPtr) return; - uptr ChunkBeg = reinterpret_cast(UserPtr); - if (!IsAligned(ChunkBeg, MinAlignment)) { + uptr UserBeg = reinterpret_cast(UserPtr); + if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) { dieWithMessage("ERROR: attempted to deallocate a chunk not properly " "aligned at address %p\n", UserPtr); } - ScudoChunk *Chunk = - reinterpret_cast(ChunkBeg - AlignedChunkHeaderSize); + ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader OldHeader; Chunk->loadHeader(&OldHeader); - if (OldHeader.State != ChunkAllocated) { + if (UNLIKELY(OldHeader.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when deallocating address " "%p\n", UserPtr); } - uptr UsableSize = Chunk->getUsableSize(&OldHeader); - UnpackedHeader NewHeader = OldHeader; - NewHeader.State = ChunkQuarantine; - Chunk->compareExchangeHeader(&NewHeader, &OldHeader); if (DeallocationTypeMismatch) { // The deallocation type has to match the allocation one. - if (NewHeader.AllocType != Type) { + if (OldHeader.AllocType != Type) { // With the exception of memalign'd Chunks, that can be still be free'd. - if (NewHeader.AllocType != FromMemalign || Type != FromMalloc) { + if (OldHeader.AllocType != FromMemalign || Type != FromMalloc) { dieWithMessage("ERROR: allocation type mismatch on address %p\n", - Chunk); + UserPtr); } } } + uptr UsableSize = Chunk->getUsableSize(&OldHeader); uptr Size = UsableSize - OldHeader.UnusedBytes; if (DeleteSizeMismatch) { if (DeleteSize && DeleteSize != Size) { dieWithMessage("ERROR: invalid sized delete on chunk at address %p\n", - Chunk); + UserPtr); } } + UnpackedHeader NewHeader = OldHeader; + NewHeader.State = ChunkQuarantine; + Chunk->compareExchangeHeader(&NewHeader, &OldHeader); + if (LIKELY(!ThreadTornDown)) { AllocatorQuarantine.Put(&ThreadQuarantineCache, QuarantineCallback(&Cache), Chunk, UsableSize); @@ -504,24 +511,27 @@ void *reallocate(void *OldPtr, uptr NewSize) { if (UNLIKELY(!ThreadInited)) initThread(); - uptr ChunkBeg = reinterpret_cast(OldPtr); - ScudoChunk *Chunk = - reinterpret_cast(ChunkBeg - AlignedChunkHeaderSize); + uptr UserBeg = reinterpret_cast(OldPtr); + if (UNLIKELY(!IsAligned(UserBeg, MinAlignment))) { + dieWithMessage("ERROR: attempted to reallocate a chunk not properly " + "aligned at address %p\n", OldPtr); + } + ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader OldHeader; Chunk->loadHeader(&OldHeader); - if (OldHeader.State != ChunkAllocated) { + if (UNLIKELY(OldHeader.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when reallocating address " "%p\n", OldPtr); } - uptr Size = Chunk->getUsableSize(&OldHeader); - if (OldHeader.AllocType != FromMalloc) { + if (UNLIKELY(OldHeader.AllocType != FromMalloc)) { dieWithMessage("ERROR: invalid chunk type when reallocating address %p\n", - Chunk); + OldPtr); } + uptr UsableSize = Chunk->getUsableSize(&OldHeader); UnpackedHeader NewHeader = OldHeader; // The new size still fits in the current chunk. - if (NewSize <= Size) { - NewHeader.UnusedBytes = Size - NewSize; + if (NewSize <= UsableSize) { + NewHeader.UnusedBytes = UsableSize - NewSize; Chunk->compareExchangeHeader(&NewHeader, &OldHeader); return OldPtr; } @@ -529,18 +539,18 @@ // old one. void *NewPtr = allocate(NewSize, MinAlignment, FromMalloc); if (NewPtr) { - uptr OldSize = Size - OldHeader.UnusedBytes; + uptr OldSize = UsableSize - OldHeader.UnusedBytes; memcpy(NewPtr, OldPtr, Min(NewSize, OldSize)); NewHeader.State = ChunkQuarantine; Chunk->compareExchangeHeader(&NewHeader, &OldHeader); if (LIKELY(!ThreadTornDown)) { AllocatorQuarantine.Put(&ThreadQuarantineCache, - QuarantineCallback(&Cache), Chunk, Size); + QuarantineCallback(&Cache), Chunk, UsableSize); } else { SpinMutexLock l(&FallbackMutex); AllocatorQuarantine.Put(&FallbackQuarantineCache, QuarantineCallback(&FallbackAllocatorCache), - Chunk, Size); + Chunk, UsableSize); } } return NewPtr; @@ -552,13 +562,12 @@ initThread(); if (!Ptr) return 0; - uptr ChunkBeg = reinterpret_cast(Ptr); - ScudoChunk *Chunk = - reinterpret_cast(ChunkBeg - AlignedChunkHeaderSize); + uptr UserBeg = reinterpret_cast(Ptr); + ScudoChunk *Chunk = getScudoChunk(UserBeg); UnpackedHeader Header; Chunk->loadHeader(&Header); // Getting the usable size of a chunk only makes sense if it's allocated. - if (Header.State != ChunkAllocated) { + if (UNLIKELY(Header.State != ChunkAllocated)) { dieWithMessage("ERROR: invalid chunk state when sizing address %p\n", Ptr); } @@ -569,13 +578,9 @@ if (UNLIKELY(!ThreadInited)) initThread(); uptr Total = NMemB * Size; - if (Size != 0 && Total / Size != NMemB) // Overflow check + if (Size != 0 && Total / Size != NMemB) // Overflow check return BackendAllocator.ReturnNullOrDieOnBadRequest(); - void *Ptr = allocate(Total, MinAlignment, FromMalloc); - // If ZeroContents, the content of the chunk has already been zero'd out. - if (!ZeroContents && Ptr && BackendAllocator.FromPrimary(Ptr)) - memset(Ptr, 0, getUsableSize(Ptr)); - return Ptr; + return allocate(Total, MinAlignment, FromMalloc, true); } void drainQuarantine() { @@ -592,9 +597,9 @@ } }; -static Allocator Instance(LINKER_INITIALIZED); +static ScudoAllocator Instance(LINKER_INITIALIZED); -static ScudoAllocator &getAllocator() { +static ScudoBackendAllocator &getBackendAllocator() { return Instance.BackendAllocator; } Index: lib/scudo/scudo_allocator_secondary.h =================================================================== --- lib/scudo/scudo_allocator_secondary.h +++ lib/scudo/scudo_allocator_secondary.h @@ -88,8 +88,11 @@ // The primary adds the whole class size to the stats when allocating a // chunk, so we will do something similar here. But we will not account for // the guard pages. - Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize); - Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize); + { + SpinMutexLock l(&StatsMutex); + Stats->Add(AllocatorStatAllocated, MapSize - 2 * PageSize); + Stats->Add(AllocatorStatMapped, MapSize - 2 * PageSize); + } return reinterpret_cast(UserBeg); } @@ -112,8 +115,11 @@ void Deallocate(AllocatorStats *Stats, void *Ptr) { SecondaryHeader *Header = getHeader(Ptr); - Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize); - Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize); + { + SpinMutexLock l(&StatsMutex); + Stats->Sub(AllocatorStatAllocated, Header->MapSize - 2 * PageSize); + Stats->Sub(AllocatorStatMapped, Header->MapSize - 2 * PageSize); + } UnmapOrDie(reinterpret_cast(Header->MapBeg), Header->MapSize); } @@ -127,7 +133,7 @@ uptr GetActuallyAllocatedSize(void *Ptr) { SecondaryHeader *Header = getHeader(Ptr); - // Deduct PageSize as MapEnd includes the trailing guard page. + // Deduct PageSize as MapSize includes the trailing guard page. uptr MapEnd = Header->MapBeg + Header->MapSize - PageSize; return MapEnd - reinterpret_cast(Ptr); } @@ -182,6 +188,7 @@ const uptr SecondaryHeaderSize = sizeof(SecondaryHeader); const uptr HeadersSize = SecondaryHeaderSize + AlignedChunkHeaderSize; uptr PageSize; + SpinMutex StatsMutex; atomic_uint8_t MayReturnNull; }; Index: lib/scudo/scudo_utils.h =================================================================== --- lib/scudo/scudo_utils.h +++ lib/scudo/scudo_utils.h @@ -41,7 +41,7 @@ struct Xorshift128Plus { public: Xorshift128Plus(); - u64 Next() { + u64 getNext() { u64 x = State[0]; const u64 y = State[1]; State[0] = y; @@ -58,7 +58,59 @@ CRC32Hardware = 1, }; -u32 computeSoftwareCRC32(u32 Crc, uptr Data); +const static u32 CRC32Table[] = { + 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, + 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, + 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, + 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, + 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, + 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, + 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, + 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, + 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, + 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, + 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, + 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, + 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, + 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, + 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, + 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, + 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, + 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, + 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, + 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, + 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, + 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, + 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, + 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, + 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, + 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, + 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e, + 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, + 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, + 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, + 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, + 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, + 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, + 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, + 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, + 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, + 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, + 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, + 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, + 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, + 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, + 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, + 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d +}; + +INLINE u32 computeSoftwareCRC32(u32 Crc, uptr Data) { + for (uptr i = 0; i < sizeof(Data); i++) { + Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8); + Data >>= 8; + } + return Crc; +} } // namespace __scudo Index: lib/scudo/scudo_utils.cpp =================================================================== --- lib/scudo/scudo_utils.cpp +++ lib/scudo/scudo_utils.cpp @@ -159,58 +159,4 @@ fillRandom(reinterpret_cast(State), sizeof(State)); } -const static u32 CRC32Table[] = { - 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f, - 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988, - 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2, - 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7, - 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9, - 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172, - 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c, - 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59, - 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423, - 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924, - 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106, - 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433, - 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d, - 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e, - 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950, - 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65, - 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7, - 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0, - 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa, - 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f, - 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81, - 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a, - 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84, - 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1, - 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb, - 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc, - 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e, - 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b, - 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55, - 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236, - 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28, - 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d, - 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f, - 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38, - 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242, - 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777, - 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69, - 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2, - 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc, - 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9, - 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693, - 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94, - 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d -}; - -u32 computeSoftwareCRC32(u32 Crc, uptr Data) { - for (uptr i = 0; i < sizeof(Data); i++) { - Crc = CRC32Table[(Crc ^ Data) & 0xff] ^ (Crc >> 8); - Data >>= 8; - } - return Crc; -} - } // namespace __scudo