Index: compiler-rt/trunk/lib/scudo/standalone/CMakeLists.txt =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/CMakeLists.txt +++ compiler-rt/trunk/lib/scudo/standalone/CMakeLists.txt @@ -5,6 +5,7 @@ set(SCUDO_CFLAGS) list(APPEND SCUDO_CFLAGS + -Werror=conversion -Wall -nostdinc++) Index: compiler-rt/trunk/lib/scudo/standalone/chunk.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/chunk.h +++ compiler-rt/trunk/lib/scudo/standalone/chunk.h @@ -29,15 +29,15 @@ u32 Crc = static_cast(CRC32_INTRINSIC(Seed, Value)); for (uptr I = 0; I < ArraySize; I++) Crc = static_cast(CRC32_INTRINSIC(Crc, Array[I])); - return static_cast((Crc & 0xffff) ^ (Crc >> 16)); + return static_cast(Crc ^ (Crc >> 16)); #else if (HashAlgorithm == Checksum::HardwareCRC32) { u32 Crc = computeHardwareCRC32(Seed, Value); for (uptr I = 0; I < ArraySize; I++) Crc = computeHardwareCRC32(Crc, Array[I]); - return static_cast((Crc & 0xffff) ^ (Crc >> 16)); + return static_cast(Crc ^ (Crc >> 16)); } else { - u16 Checksum = computeBSDChecksum(static_cast(Seed & 0xffff), Value); + u16 Checksum = computeBSDChecksum(static_cast(Seed), Value); for (uptr I = 0; I < ArraySize; I++) Checksum = computeBSDChecksum(Checksum, Array[I]); return Checksum; @@ -63,24 +63,24 @@ typedef u64 PackedHeader; // Update the 'Mask' constants to reflect changes in this structure. struct UnpackedHeader { - u64 Checksum : 16; - u64 ClassId : 8; - u64 SizeOrUnusedBytes : 20; + uptr ClassId : 8; u8 State : 2; u8 Origin : 2; - u64 Offset : 16; + uptr SizeOrUnusedBytes : 20; + uptr Offset : 16; + uptr Checksum : 16; }; typedef atomic_u64 AtomicPackedHeader; COMPILER_CHECK(sizeof(UnpackedHeader) == sizeof(PackedHeader)); // Those constants are required to silence some -Werror=conversion errors when // assigning values to the related bitfield variables. -constexpr uptr ChecksumMask = (1UL << 16) - 1; constexpr uptr ClassIdMask = (1UL << 8) - 1; +constexpr u8 StateMask = (1U << 2) - 1; +constexpr u8 OriginMask = (1U << 2) - 1; constexpr uptr SizeOrUnusedBytesMask = (1UL << 20) - 1; -constexpr uptr StateMask = (1UL << 2) - 1; -constexpr uptr OriginMask = (1UL << 2) - 1; constexpr uptr OffsetMask = (1UL << 16) - 1; +constexpr uptr ChecksumMask = (1UL << 16) - 1; constexpr uptr getHeaderSize() { return roundUpTo(sizeof(PackedHeader), 1U << SCUDO_MIN_ALIGNMENT_LOG); Index: compiler-rt/trunk/lib/scudo/standalone/combined.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/combined.h +++ compiler-rt/trunk/lib/scudo/standalone/combined.h @@ -46,8 +46,8 @@ Chunk::compareExchangeHeader(Allocator.Cookie, Ptr, &NewHeader, &Header); void *BlockBegin = Allocator::getBlockBegin(Ptr, &NewHeader); - const uptr ClassId = Header.ClassId; - if (ClassId) + const uptr ClassId = NewHeader.ClassId; + if (LIKELY(ClassId)) Cache.deallocate(ClassId, BlockBegin); else Allocator.Secondary.deallocate(BlockBegin); @@ -123,14 +123,16 @@ Options.ZeroContents = getFlags()->zero_contents; Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch; Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch; - Options.QuarantineMaxChunkSize = getFlags()->quarantine_max_chunk_size; + Options.QuarantineMaxChunkSize = + static_cast(getFlags()->quarantine_max_chunk_size); Stats.initLinkerInitialized(); Primary.initLinkerInitialized(getFlags()->release_to_os_interval_ms); Secondary.initLinkerInitialized(&Stats); - Quarantine.init(getFlags()->quarantine_size_kb << 10, - getFlags()->thread_local_quarantine_size_kb << 10); + Quarantine.init( + static_cast(getFlags()->quarantine_size_kb << 10), + static_cast(getFlags()->thread_local_quarantine_size_kb << 10)); } void reset() { memset(this, 0, sizeof(*this)); } @@ -165,16 +167,17 @@ return nullptr; reportAlignmentTooBig(Alignment, MaxAlignment); } - if (UNLIKELY(Alignment < MinAlignment)) + if (Alignment < MinAlignment) Alignment = MinAlignment; // If the requested size happens to be 0 (more common than you might think), - // allocate 1 byte on top of the header. Then add the extra bytes required - // to fulfill the alignment requirements: we allocate enough to be sure that - // there will be an address in the block that will satisfy the alignment. + // allocate MinAlignment bytes on top of the header. Then add the extra + // bytes required to fulfill the alignment requirements: we allocate enough + // to be sure that there will be an address in the block that will satisfy + // the alignment. const uptr NeededSize = - Chunk::getHeaderSize() + roundUpTo(Size ? Size : 1, MinAlignment) + - ((Alignment > MinAlignment) ? (Alignment - Chunk::getHeaderSize()) : 0); + roundUpTo(Size, MinAlignment) + + ((Alignment > MinAlignment) ? Alignment : Chunk::getHeaderSize()); // Takes care of extravagantly large sizes as well as integer overflows. if (UNLIKELY(Size >= MaxAllowedMallocSize || @@ -186,9 +189,10 @@ void *Block; uptr ClassId; - uptr BlockEnd = 0; - if (PrimaryT::canAllocate(NeededSize)) { + uptr BlockEnd; + if (LIKELY(PrimaryT::canAllocate(NeededSize))) { ClassId = SizeClassMap::getClassIdBySize(NeededSize); + DCHECK_NE(ClassId, 0U); bool UnlockRequired; auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired); Block = TSD->Cache.allocate(ClassId); @@ -205,17 +209,17 @@ reportOutOfMemory(NeededSize); } - // We only need to zero the contents for Primary backed allocations. - if ((ZeroContents || Options.ZeroContents) && ClassId) + // We only need to zero the contents for Primary backed allocations. This + // condition is not necessarily unlikely, but since memset is costly, we + // might as well mark it as such. + if (UNLIKELY((ZeroContents || Options.ZeroContents) && ClassId)) memset(Block, 0, PrimaryT::getSizeByClassId(ClassId)); Chunk::UnpackedHeader Header = {}; uptr UserPtr = reinterpret_cast(Block) + Chunk::getHeaderSize(); - // The following condition isn't necessarily "UNLIKELY". - if (!isAligned(UserPtr, Alignment)) { + if (UNLIKELY(!isAligned(UserPtr, Alignment))) { const uptr AlignedUserPtr = roundUpTo(UserPtr, Alignment); const uptr Offset = AlignedUserPtr - UserPtr; - Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask; DCHECK_GT(Offset, 2 * sizeof(u32)); // The BlockMarker has no security purpose, but is specifically meant for // the chunk iteration function that can be used in debugging situations. @@ -224,16 +228,13 @@ reinterpret_cast(Block)[0] = BlockMarker; reinterpret_cast(Block)[1] = static_cast(Offset); UserPtr = AlignedUserPtr; + Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask; } + Header.ClassId = ClassId & Chunk::ClassIdMask; Header.State = Chunk::State::Allocated; Header.Origin = Origin & Chunk::OriginMask; - if (ClassId) { - Header.ClassId = ClassId & Chunk::ClassIdMask; - Header.SizeOrUnusedBytes = Size & Chunk::SizeOrUnusedBytesMask; - } else { - Header.SizeOrUnusedBytes = - (BlockEnd - (UserPtr + Size)) & Chunk::SizeOrUnusedBytesMask; - } + Header.SizeOrUnusedBytes = (ClassId ? Size : BlockEnd - (UserPtr + Size)) & + Chunk::SizeOrUnusedBytesMask; void *Ptr = reinterpret_cast(UserPtr); Chunk::storeHeader(Cookie, Ptr, &Header); @@ -313,7 +314,7 @@ const uptr OldSize = getSize(OldPtr, &OldHeader); // If the new size is identical to the old one, or lower but within an // acceptable range, we just keep the old chunk, and update its header. - if (NewSize == OldSize) + if (UNLIKELY(NewSize == OldSize)) return OldPtr; if (NewSize < OldSize) { const uptr Delta = OldSize - NewSize; @@ -471,8 +472,7 @@ // last and last class sizes, as well as the dynamic base for the Primary. // The following is an over-approximation that works for our needs. const uptr MaxSizeOrUnusedBytes = SizeClassMap::MaxSize - 1; - Header.SizeOrUnusedBytes = - MaxSizeOrUnusedBytes & Chunk::SizeOrUnusedBytesMask; + Header.SizeOrUnusedBytes = MaxSizeOrUnusedBytes; if (UNLIKELY(Header.SizeOrUnusedBytes != MaxSizeOrUnusedBytes)) reportSanityCheckError("size (or unused bytes)"); @@ -484,15 +484,15 @@ static INLINE void *getBlockBegin(const void *Ptr, Chunk::UnpackedHeader *Header) { - return reinterpret_cast(reinterpret_cast(Ptr) - - Chunk::getHeaderSize() - - (Header->Offset << MinAlignmentLog)); + return reinterpret_cast( + reinterpret_cast(Ptr) - Chunk::getHeaderSize() - + (static_cast(Header->Offset) << MinAlignmentLog)); } // Return the size of a chunk as requested during its allocation. INLINE uptr getSize(const void *Ptr, Chunk::UnpackedHeader *Header) { const uptr SizeOrUnusedBytes = Header->SizeOrUnusedBytes; - if (Header->ClassId) + if (LIKELY(Header->ClassId)) return SizeOrUnusedBytes; return SecondaryT::getBlockEnd(getBlockBegin(Ptr, Header)) - reinterpret_cast(Ptr) - SizeOrUnusedBytes; @@ -514,7 +514,7 @@ Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header); void *BlockBegin = getBlockBegin(Ptr, &NewHeader); const uptr ClassId = NewHeader.ClassId; - if (ClassId) { + if (LIKELY(ClassId)) { bool UnlockRequired; auto *TSD = TSDRegistry.getTSDAndLock(&UnlockRequired); TSD->Cache.deallocate(ClassId, BlockBegin); Index: compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc +++ compiler-rt/trunk/lib/scudo/standalone/fuchsia.cc @@ -40,7 +40,7 @@ _zx_vmar_root_self(), ZX_VM_CAN_MAP_READ | ZX_VM_CAN_MAP_WRITE | ZX_VM_CAN_MAP_SPECIFIC, 0, Size, &Data->Vmar, &Data->VmarBase); - if (Status != ZX_OK) { + if (UNLIKELY(Status != ZX_OK)) { if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem) dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY); return nullptr; @@ -78,7 +78,7 @@ } else { // Otherwise, create a Vmo and set its name. Status = _zx_vmo_create(Size, ZX_VMO_RESIZABLE, &Vmo); - if (Status != ZX_OK) { + if (UNLIKELY(Status != ZX_OK)) { if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem) dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY); return nullptr; @@ -102,7 +102,7 @@ } else { CHECK_EQ(_zx_handle_close(Vmo), ZX_OK); } - if (Status != ZX_OK) { + if (UNLIKELY(Status != ZX_OK)) { if (Status != ZX_ERR_NO_MEMORY || !AllowNoMem) dieOnMapUnmapError(Status == ZX_ERR_NO_MEMORY); return nullptr; @@ -125,7 +125,7 @@ const zx_handle_t Vmar = Data ? Data->Vmar : _zx_vmar_root_self(); const zx_status_t Status = _zx_vmar_unmap(Vmar, reinterpret_cast(Addr), Size); - if (Status != ZX_OK) + if (UNLIKELY(Status != ZX_OK)) dieOnMapUnmapError(); } if (Data) { @@ -172,7 +172,7 @@ bool getRandom(void *Buffer, uptr Length, bool Blocking) { COMPILER_CHECK(MaxRandomLength <= ZX_CPRNG_DRAW_MAX_LEN); - if (!Buffer || !Length || Length > MaxRandomLength) + if (UNLIKELY(!Buffer || !Length || Length > MaxRandomLength)) return false; _zx_cprng_draw(Buffer, Length); return true; Index: compiler-rt/trunk/lib/scudo/standalone/local_cache.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/local_cache.h +++ compiler-rt/trunk/lib/scudo/standalone/local_cache.h @@ -22,9 +22,8 @@ static const u32 MaxNumCached = SizeClassMap::MaxNumCachedHint; void setFromArray(void **Array, u32 N) { DCHECK_LE(N, MaxNumCached); - for (u32 I = 0; I < N; I++) - Batch[I] = Array[I]; Count = N; + memcpy(Batch, Array, sizeof(void *) * Count); } void clear() { Count = 0; } void add(void *P) { @@ -32,8 +31,7 @@ Batch[Count++] = P; } void copyToArray(void **Array) const { - for (u32 I = 0; I < Count; I++) - Array[I] = Batch[I]; + memcpy(Array, Batch, sizeof(void *) * Count); } u32 getCount() const { return Count; } void *get(u32 I) const { @@ -52,7 +50,7 @@ void initLinkerInitialized(GlobalStats *S, SizeClassAllocator *A) { Stats.initLinkerInitialized(); - if (S) + if (LIKELY(S)) S->link(&Stats); Allocator = A; } @@ -64,12 +62,12 @@ void destroy(GlobalStats *S) { drain(); - if (S) + if (LIKELY(S)) S->unlink(&Stats); } void *allocate(uptr ClassId) { - CHECK_LT(ClassId, NumClasses); + DCHECK_LT(ClassId, NumClasses); PerClass *C = &PerClassArray[ClassId]; if (C->Count == 0) { if (UNLIKELY(!refill(C, ClassId))) @@ -157,8 +155,8 @@ if (UNLIKELY(!B)) return false; DCHECK_GT(B->getCount(), 0); - B->copyToArray(C->Chunks); C->Count = B->getCount(); + B->copyToArray(C->Chunks); destroyBatch(ClassId, B); return true; } Index: compiler-rt/trunk/lib/scudo/standalone/mutex.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/mutex.h +++ compiler-rt/trunk/lib/scudo/standalone/mutex.h @@ -25,12 +25,12 @@ void init() { memset(this, 0, sizeof(*this)); } bool tryLock(); NOINLINE void lock() { - if (tryLock()) + if (LIKELY(tryLock())) return; - // The compiler may try to fully unroll the loop, ending up in a - // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This - // is large, ugly and unneeded, a compact loop is better for our purpose - // here. Use a pragma to tell the compiler not to unroll the loop. + // The compiler may try to fully unroll the loop, ending up in a + // NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This + // is large, ugly and unneeded, a compact loop is better for our purpose + // here. Use a pragma to tell the compiler not to unroll the loop. #ifdef __clang__ #pragma nounroll #endif @@ -44,8 +44,8 @@ void unlock(); private: - static constexpr u8 NumberOfTries = 10U; - static constexpr u8 NumberOfYields = 10U; + static constexpr u8 NumberOfTries = 8U; + static constexpr u8 NumberOfYields = 8U; #if SCUDO_LINUX atomic_u32 M; Index: compiler-rt/trunk/lib/scudo/standalone/primary32.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/primary32.h +++ compiler-rt/trunk/lib/scudo/standalone/primary32.h @@ -74,7 +74,7 @@ // See comment in the 64-bit primary about releasing smaller size classes. Sci->CanRelease = (ReleaseToOsInterval > 0) && (I != SizeClassMap::BatchClassId) && - (getSizeByClassId(I) >= (PageSize / 32)); + (getSizeByClassId(I) >= (PageSize / 16)); } ReleaseToOsIntervalMs = ReleaseToOsInterval; } @@ -99,9 +99,9 @@ SizeClassInfo *Sci = getSizeClassInfo(ClassId); ScopedLock L(Sci->Mutex); TransferBatch *B = Sci->FreeList.front(); - if (B) + if (B) { Sci->FreeList.pop_front(); - else { + } else { B = populateFreeList(C, ClassId, Sci); if (UNLIKELY(!B)) return nullptr; @@ -129,7 +129,7 @@ void enable() { for (sptr I = static_cast(NumClasses) - 1; I >= 0; I--) - getSizeClassInfo(I)->Mutex.unlock(); + getSizeClassInfo(static_cast(I))->Mutex.unlock(); } template void iterateOverBlocks(F Callback) { @@ -356,7 +356,8 @@ const s32 IntervalMs = ReleaseToOsIntervalMs; if (IntervalMs < 0) return; - if (Sci->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL > + if (Sci->ReleaseInfo.LastReleaseAtNs + + static_cast(IntervalMs) * 1000000ULL > getMonotonicTime()) { return; // Memory was returned recently. } Index: compiler-rt/trunk/lib/scudo/standalone/primary64.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/primary64.h +++ compiler-rt/trunk/lib/scudo/standalone/primary64.h @@ -81,7 +81,7 @@ // TODO(kostyak): make the lower limit a runtime option Region->CanRelease = (ReleaseToOsInterval > 0) && (I != SizeClassMap::BatchClassId) && - (getSizeByClassId(I) >= (PageSize / 32)); + (getSizeByClassId(I) >= (PageSize / 16)); Region->RandState = getRandomU32(&Seed); } ReleaseToOsIntervalMs = ReleaseToOsInterval; @@ -102,9 +102,9 @@ RegionInfo *Region = getRegionInfo(ClassId); ScopedLock L(Region->Mutex); TransferBatch *B = Region->FreeList.front(); - if (B) + if (B) { Region->FreeList.pop_front(); - else { + } else { B = populateFreeList(C, ClassId, Region); if (UNLIKELY(!B)) return nullptr; @@ -131,7 +131,7 @@ void enable() { for (sptr I = static_cast(NumClasses) - 1; I >= 0; I--) - getRegionInfo(I)->Mutex.unlock(); + getRegionInfo(static_cast(I))->Mutex.unlock(); } template void iterateOverBlocks(F Callback) const { @@ -181,7 +181,7 @@ static const uptr PrimarySize = RegionSize * NumClasses; // Call map for user memory with at least this size. - static const uptr MapSizeIncrement = 1UL << 16; + static const uptr MapSizeIncrement = 1UL << 17; struct RegionStats { uptr PoppedBlocks; @@ -272,7 +272,7 @@ } return nullptr; } - if (MappedUser == 0) + if (UNLIKELY(MappedUser == 0)) Region->Data = Data; if (UNLIKELY(!map(reinterpret_cast(RegionBeg + MappedUser), UserMapSize, "scudo:primary", @@ -307,7 +307,7 @@ return nullptr; } DCHECK(B); - CHECK_GT(B->getCount(), 0); + DCHECK_GT(B->getCount(), 0); Region->AllocatedUser += AllocatedUser; Region->Exhausted = false; @@ -355,7 +355,8 @@ const s32 IntervalMs = ReleaseToOsIntervalMs; if (IntervalMs < 0) return; - if (Region->ReleaseInfo.LastReleaseAtNs + IntervalMs * 1000000ULL > + if (Region->ReleaseInfo.LastReleaseAtNs + + static_cast(IntervalMs) * 1000000ULL > getMonotonicTime()) { return; // Memory was returned recently. } Index: compiler-rt/trunk/lib/scudo/standalone/secondary.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/secondary.h +++ compiler-rt/trunk/lib/scudo/standalone/secondary.h @@ -50,7 +50,7 @@ public: void initLinkerInitialized(GlobalStats *S) { Stats.initLinkerInitialized(); - if (S) + if (LIKELY(S)) S->link(&Stats); } void init(GlobalStats *S) { Index: compiler-rt/trunk/lib/scudo/standalone/secondary.cc =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/secondary.cc +++ compiler-rt/trunk/lib/scudo/standalone/secondary.cc @@ -32,14 +32,14 @@ uptr MapBase = reinterpret_cast(map(nullptr, MapSize, "scudo:secondary", MAP_NOACCESS | MAP_ALLOWNOMEM, &Data)); - if (!MapBase) + if (UNLIKELY(!MapBase)) return nullptr; uptr CommitBase = MapBase + PageSize; uptr MapEnd = MapBase + MapSize; // In the unlikely event of alignments larger than a page, adjust the amount // of memory we want to commit, and trim the extra memory. - if (AlignmentHint >= PageSize) { + if (UNLIKELY(AlignmentHint >= PageSize)) { // For alignments greater than or equal to a page, the user pointer (eg: the // pointer that is returned by the C or C++ allocation APIs) ends up on a // page boundary , and our headers will live in the preceding page. @@ -73,13 +73,11 @@ H->Data = Data; { ScopedLock L(Mutex); - if (!Tail) { - Tail = H; - } else { + if (LIKELY(Tail)) { Tail->Next = H; H->Prev = Tail; - Tail = H; } + Tail = H; AllocatedBytes += CommitSize; if (LargestSize < CommitSize) LargestSize = CommitSize; @@ -106,7 +104,7 @@ CHECK_EQ(Next->Prev, H); Next->Prev = Prev; } - if (Tail == H) { + if (UNLIKELY(Tail == H)) { CHECK(!Next); Tail = Prev; } else { Index: compiler-rt/trunk/lib/scudo/standalone/size_class_map.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/size_class_map.h +++ compiler-rt/trunk/lib/scudo/standalone/size_class_map.h @@ -138,10 +138,10 @@ // TODO(kostyak): further tune class maps for Android & Fuchsia. #if SCUDO_WORDSIZE == 64U typedef SizeClassMap<3, 5, 8, 15, 8, 10> SvelteSizeClassMap; -typedef SizeClassMap<3, 5, 8, 16, 14, 12> AndroidSizeClassMap; +typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap; #else typedef SizeClassMap<3, 4, 7, 15, 8, 10> SvelteSizeClassMap; -typedef SizeClassMap<3, 4, 7, 16, 14, 12> AndroidSizeClassMap; +typedef SizeClassMap<3, 4, 7, 17, 14, 14> AndroidSizeClassMap; #endif } // namespace scudo Index: compiler-rt/trunk/lib/scudo/standalone/string_utils.cc =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/string_utils.cc +++ compiler-rt/trunk/lib/scudo/standalone/string_utils.cc @@ -9,7 +9,6 @@ #include "string_utils.h" #include "common.h" -#include #include #include @@ -44,7 +43,7 @@ do { RAW_CHECK_MSG(static_cast(Pos) < MaxLen, "appendNumber buffer overflow"); - NumBuffer[Pos++] = AbsoluteValue % Base; + NumBuffer[Pos++] = static_cast(AbsoluteValue % Base); AbsoluteValue /= Base; } while (AbsoluteValue > 0); if (Pos < MinNumberLength) { @@ -117,7 +116,7 @@ int formatString(char *Buffer, uptr BufferLength, const char *Format, va_list Args) { - UNUSED static const char *PrintfFormatsHelp = + static const char *PrintfFormatsHelp = "Supported formatString formats: %([0-9]*)?(z|ll)?{d,u,x,X}; %p; " "%[-]([0-9]*)?(\\.\\*)?s; %c\n"; RAW_CHECK(Format); Index: compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h +++ compiler-rt/trunk/lib/scudo/standalone/tsd_exclusive.h @@ -61,7 +61,7 @@ private: void initOnceMaybe(Allocator *Instance) { ScopedLock L(Mutex); - if (Initialized) + if (LIKELY(Initialized)) return; initLinkerInitialized(Instance); // Sets Initialized. } @@ -71,7 +71,7 @@ // used instead. NOINLINE void initThread(Allocator *Instance, bool MinimalInit) { initOnceMaybe(Instance); - if (MinimalInit) + if (UNLIKELY(MinimalInit)) return; CHECK_EQ( pthread_setspecific(PThreadKey, reinterpret_cast(Instance)), 0); Index: compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h +++ compiler-rt/trunk/lib/scudo/standalone/tsd_shared.h @@ -95,7 +95,7 @@ void initOnceMaybe(Allocator *Instance) { ScopedLock L(Mutex); - if (Initialized) + if (LIKELY(Initialized)) return; initLinkerInitialized(Instance); // Sets Initialized. } @@ -112,8 +112,7 @@ // Use the Precedence of the current TSD as our random seed. Since we are // in the slow path, it means that tryLock failed, and as a result it's // very likely that said Precedence is non-zero. - u32 RandState = static_cast(CurrentTSD->getPrecedence()); - const u32 R = getRandomU32(&RandState); + const u32 R = static_cast(CurrentTSD->getPrecedence()); const u32 Inc = CoPrimes[R % NumberOfCoPrimes]; u32 Index = R % NumberOfTSDs; uptr LowestPrecedence = UINTPTR_MAX;