diff --git a/compiler-rt/lib/scudo/standalone/atomic_helpers.h b/compiler-rt/lib/scudo/standalone/atomic_helpers.h --- a/compiler-rt/lib/scudo/standalone/atomic_helpers.h +++ b/compiler-rt/lib/scudo/standalone/atomic_helpers.h @@ -89,6 +89,20 @@ return __atomic_fetch_sub(&A->ValDoNotUse, V, MO); } +template +inline typename T::Type atomic_fetch_and(volatile T *A, typename T::Type V, + memory_order MO) { + DCHECK(!(reinterpret_cast(A) % sizeof(*A))); + return __atomic_fetch_and(&A->ValDoNotUse, V, MO); +} + +template +inline typename T::Type atomic_fetch_or(volatile T *A, typename T::Type V, + memory_order MO) { + DCHECK(!(reinterpret_cast(A) % sizeof(*A))); + return __atomic_fetch_or(&A->ValDoNotUse, V, MO); +} + template inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V, memory_order MO) { diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -15,6 +15,7 @@ #include "flags_parser.h" #include "local_cache.h" #include "memtag.h" +#include "options.h" #include "quarantine.h" #include "report.h" #include "secondary.h" @@ -144,16 +145,19 @@ reportUnrecognizedFlags(); // Store some flags locally. - Options.MayReturnNull = getFlags()->may_return_null; - Options.FillContents = - getFlags()->zero_contents - ? ZeroFill - : (getFlags()->pattern_fill_contents ? PatternOrZeroFill : NoFill); - Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch; - Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch; - Options.TrackAllocationStacks = false; - Options.UseOddEvenTags = true; - Options.QuarantineMaxChunkSize = + if (getFlags()->may_return_null) + Primary.Options.set(OptionBit::MayReturnNull); + if (getFlags()->zero_contents) + Primary.Options.setFillContentsMode(ZeroFill); + else if (getFlags()->pattern_fill_contents) + Primary.Options.setFillContentsMode(PatternOrZeroFill); + if (getFlags()->dealloc_type_mismatch) + Primary.Options.set(OptionBit::DeallocTypeMismatch); + if (getFlags()->delete_size_mismatch) + Primary.Options.set(OptionBit::DeleteSizeMismatch); + Primary.Options.set(OptionBit::UseOddEvenTags); + + QuarantineMaxChunkSize = static_cast(getFlags()->quarantine_max_chunk_size); Stats.initLinkerInitialized(); @@ -250,8 +254,8 @@ #endif } - uptr computeOddEvenMaskForPointerMaybe(uptr Ptr, uptr Size) { - if (!Options.UseOddEvenTags) + uptr computeOddEvenMaskForPointerMaybe(Options Options, uptr Ptr, uptr Size) { + if (!Options.get(OptionBit::UseOddEvenTags)) return 0; // If a chunk's tag is odd, we want the tags of the surrounding blocks to be @@ -267,6 +271,7 @@ uptr Alignment = MinAlignment, bool ZeroContents = false) { initThreadMaybe(); + Options Options = Primary.Options.load(); #ifdef GWP_ASAN_HOOKS if (UNLIKELY(GuardedAlloc.shouldSample())) { @@ -278,10 +283,10 @@ const FillContentsMode FillContents = ZeroContents ? ZeroFill : TSDRegistry.getDisableMemInit() ? NoFill - : Options.FillContents; + : Options.getFillContentsMode(); if (UNLIKELY(Alignment > MaxAlignment)) { - if (Options.MayReturnNull) + if (Options.get(OptionBit::MayReturnNull)) return nullptr; reportAlignmentTooBig(Alignment, MaxAlignment); } @@ -300,7 +305,7 @@ // Takes care of extravagantly large sizes as well as integer overflows. static_assert(MaxAllowedMallocSize < UINTPTR_MAX - MaxAlignment, ""); if (UNLIKELY(Size >= MaxAllowedMallocSize)) { - if (Options.MayReturnNull) + if (Options.get(OptionBit::MayReturnNull)) return nullptr; reportAllocationSizeTooBig(Size, NeededSize, MaxAllowedMallocSize); } @@ -336,7 +341,7 @@ FillContents); if (UNLIKELY(!Block)) { - if (Options.MayReturnNull) + if (Options.get(OptionBit::MayReturnNull)) return nullptr; reportOutOfMemory(NeededSize); } @@ -359,7 +364,7 @@ // // When memory tagging is enabled, zeroing the contents is done as part of // setting the tag. - if (UNLIKELY(useMemoryTagging())) { + if (UNLIKELY(useMemoryTagging(Options))) { uptr PrevUserPtr; Chunk::UnpackedHeader Header; const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId); @@ -424,10 +429,10 @@ } } else { const uptr OddEvenMask = - computeOddEvenMaskForPointerMaybe(BlockUptr, BlockSize); + computeOddEvenMaskForPointerMaybe(Options, BlockUptr, BlockSize); TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd); } - storeAllocationStackMaybe(Ptr); + storeAllocationStackMaybe(Options, Ptr); } else if (UNLIKELY(FillContents != NoFill)) { // This condition is not necessarily unlikely, but since memset is // costly, we might as well mark it as such. @@ -471,6 +476,7 @@ // the TLS destructors, ending up in initialized thread specific data never // being destroyed properly. Any other heap operation will do a full init. initThreadMaybe(/*MinimalInit=*/true); + Options Options = Primary.Options.load(); #ifdef GWP_ASAN_HOOKS if (UNLIKELY(GuardedAlloc.pointerIsMine(Ptr))) { @@ -494,7 +500,7 @@ if (UNLIKELY(Header.State != Chunk::State::Allocated)) reportInvalidChunkState(AllocatorAction::Deallocating, Ptr); - if (Options.DeallocTypeMismatch) { + if (Options.get(OptionBit::DeallocTypeMismatch)) { if (Header.OriginOrWasZeroed != Origin) { // With the exception of memalign'd chunks, that can be still be free'd. if (UNLIKELY(Header.OriginOrWasZeroed != Chunk::Origin::Memalign || @@ -505,19 +511,20 @@ } const uptr Size = getSize(Ptr, &Header); - if (DeleteSize && Options.DeleteSizeMismatch) { + if (DeleteSize && Options.get(OptionBit::DeleteSizeMismatch)) { if (UNLIKELY(DeleteSize != Size)) reportDeleteSizeMismatch(Ptr, DeleteSize, Size); } - quarantineOrDeallocateChunk(Ptr, &Header, Size); + quarantineOrDeallocateChunk(Options, Ptr, &Header, Size); } void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) { initThreadMaybe(); + Options Options = Primary.Options.load(); if (UNLIKELY(NewSize >= MaxAllowedMallocSize)) { - if (Options.MayReturnNull) + if (Options.get(OptionBit::MayReturnNull)) return nullptr; reportAllocationSizeTooBig(NewSize, 0, MaxAllowedMallocSize); } @@ -552,7 +559,7 @@ // Pointer has to be allocated with a malloc-type function. Some // applications think that it is OK to realloc a memalign'ed pointer, which // will trigger this check. It really isn't. - if (Options.DeallocTypeMismatch) { + if (Options.get(OptionBit::DeallocTypeMismatch)) { if (UNLIKELY(OldHeader.OriginOrWasZeroed != Chunk::Origin::Malloc)) reportDeallocTypeMismatch(AllocatorAction::Reallocating, OldPtr, OldHeader.OriginOrWasZeroed, @@ -583,11 +590,11 @@ : BlockEnd - (reinterpret_cast(OldPtr) + NewSize)) & Chunk::SizeOrUnusedBytesMask; Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader); - if (UNLIKELY(ClassId && useMemoryTagging())) { + if (UNLIKELY(ClassId && useMemoryTagging(Options))) { resizeTaggedChunk(reinterpret_cast(OldTaggedPtr) + OldSize, reinterpret_cast(OldTaggedPtr) + NewSize, BlockEnd); - storeAllocationStackMaybe(OldPtr); + storeAllocationStackMaybe(Options, OldPtr); } return OldTaggedPtr; } @@ -601,7 +608,7 @@ if (NewPtr) { const uptr OldSize = getSize(OldPtr, &OldHeader); memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize)); - quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize); + quarantineOrDeallocateChunk(Options, OldPtr, &OldHeader, OldSize); } return NewPtr; } @@ -682,7 +689,7 @@ if (getChunkFromBlock(Block, &Chunk, &Header) && Header.State == Chunk::State::Allocated) { uptr TaggedChunk = Chunk; - if (useMemoryTagging()) + if (useMemoryTagging(Primary.Options.load())) TaggedChunk = loadTag(Chunk); Callback(TaggedChunk, getSize(reinterpret_cast(Chunk), &Header), Arg); @@ -697,7 +704,7 @@ bool canReturnNull() { initThreadMaybe(); - return Options.MayReturnNull; + return Primary.Options.load().get(OptionBit::MayReturnNull); } bool setOption(Option O, sptr Value) { @@ -711,9 +718,9 @@ // any particular chunk is cut in half. Therefore we use this tuning // setting to control whether odd/even tags are enabled. if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW) - Options.UseOddEvenTags = true; + Primary.Options.set(OptionBit::UseOddEvenTags); else if (Value == M_MEMTAG_TUNING_UAF) - Options.UseOddEvenTags = false; + Primary.Options.clear(OptionBit::UseOddEvenTags); return true; } else { // We leave it to the various sub-components to decide whether or not they @@ -773,18 +780,26 @@ Header.State == Chunk::State::Allocated; } - bool useMemoryTagging() { return Primary.useMemoryTagging(); } + bool useMemoryTagging() const { + return useMemoryTagging(Primary.Options.load()); + } + static bool useMemoryTagging(Options Options) { + return PrimaryT::useMemoryTagging(Options); + } void disableMemoryTagging() { Primary.disableMemoryTagging(); } void setTrackAllocationStacks(bool Track) { initThreadMaybe(); - Options.TrackAllocationStacks = Track; + if (Track) + Primary.Options.set(OptionBit::TrackAllocationStacks); + else + Primary.Options.clear(OptionBit::TrackAllocationStacks); } void setFillContents(FillContentsMode FillContents) { initThreadMaybe(); - Options.FillContents = FillContents; + Primary.Options.setFillContentsMode(FillContents); } const char *getStackDepotAddress() const { @@ -951,16 +966,7 @@ static const uptr MaxTraceSize = 64; u32 Cookie; - - struct { - u8 MayReturnNull : 1; // may_return_null - FillContentsMode FillContents : 2; // zero_contents, pattern_fill_contents - u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch - u8 DeleteSizeMismatch : 1; // delete_size_mismatch - u8 TrackAllocationStacks : 1; - u8 UseOddEvenTags : 1; - u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size - } Options; + u32 QuarantineMaxChunkSize; GlobalStats Stats; PrimaryT Primary; @@ -1025,15 +1031,15 @@ reinterpret_cast(Ptr) - SizeOrUnusedBytes; } - void quarantineOrDeallocateChunk(void *Ptr, Chunk::UnpackedHeader *Header, - uptr Size) { + void quarantineOrDeallocateChunk(Options Options, void *Ptr, + Chunk::UnpackedHeader *Header, uptr Size) { Chunk::UnpackedHeader NewHeader = *Header; - if (UNLIKELY(NewHeader.ClassId && useMemoryTagging())) { + if (UNLIKELY(NewHeader.ClassId && useMemoryTagging(Options))) { u8 PrevTag = extractTag(loadTag(reinterpret_cast(Ptr))); if (!TSDRegistry.getDisableMemInit()) { uptr TaggedBegin, TaggedEnd; const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe( - reinterpret_cast(getBlockBegin(Ptr, &NewHeader)), + Options, reinterpret_cast(getBlockBegin(Ptr, &NewHeader)), SizeClassMap::getSizeByClassId(NewHeader.ClassId)); // Exclude the previous tag so that immediate use after free is detected // 100% of the time. @@ -1041,14 +1047,14 @@ &TaggedEnd); } NewHeader.OriginOrWasZeroed = !TSDRegistry.getDisableMemInit(); - storeDeallocationStackMaybe(Ptr, PrevTag); + storeDeallocationStackMaybe(Options, Ptr, PrevTag); } // If the quarantine is disabled, the actual size of a chunk is 0 or larger // than the maximum allowed, we return a chunk directly to the backend. // Logical Or can be short-circuited, which introduces unnecessary // conditional jumps, so use bitwise Or and let the compiler be clever. - const bool BypassQuarantine = !Quarantine.getCacheSize() | !Size | - (Size > Options.QuarantineMaxChunkSize); + const bool BypassQuarantine = + !Quarantine.getCacheSize() | !Size | (Size > QuarantineMaxChunkSize); if (BypassQuarantine) { NewHeader.State = Chunk::State::Available; Chunk::compareExchangeHeader(Cookie, Ptr, &NewHeader, Header); @@ -1089,16 +1095,17 @@ return Offset + Chunk::getHeaderSize(); } - void storeAllocationStackMaybe(void *Ptr) { - if (!UNLIKELY(Options.TrackAllocationStacks)) + void storeAllocationStackMaybe(Options Options, void *Ptr) { + if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks))) return; auto *Ptr32 = reinterpret_cast(Ptr); Ptr32[MemTagAllocationTraceIndex] = collectStackTrace(); Ptr32[MemTagAllocationTidIndex] = getThreadID(); } - void storeDeallocationStackMaybe(void *Ptr, uint8_t PrevTag) { - if (!UNLIKELY(Options.TrackAllocationStacks)) + void storeDeallocationStackMaybe(Options Options, void *Ptr, + uint8_t PrevTag) { + if (!UNLIKELY(Options.get(OptionBit::TrackAllocationStacks))) return; // Disable tag checks here so that we don't need to worry about zero sized diff --git a/compiler-rt/lib/scudo/standalone/options.h b/compiler-rt/lib/scudo/standalone/options.h new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/scudo/standalone/options.h @@ -0,0 +1,72 @@ +//===-- options.h -----------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_OPTIONS_H_ +#define SCUDO_OPTIONS_H_ + +#include "atomic_helpers.h" +#include "common.h" + +namespace scudo { + +enum class OptionBit { + MayReturnNull, + FillContents0of2, + FillContents1of2, + DeallocTypeMismatch, + DeleteSizeMismatch, + TrackAllocationStacks, + UseOddEvenTags, + UseMemoryTagging, +}; + +struct Options { + u32 Val; + + bool get(OptionBit Opt) const { return Val & (1U << static_cast(Opt)); } + + FillContentsMode getFillContentsMode() const { + return static_cast( + (Val >> static_cast(OptionBit::FillContents0of2)) & 3); + } +}; + +struct AtomicOptions { + atomic_u32 Val; + +public: + Options load() const { + return Options{atomic_load(&Val, memory_order_relaxed)}; + } + + void clear(OptionBit Opt) { + atomic_fetch_and(&Val, ~(1U << static_cast(Opt)), + memory_order_relaxed); + } + + void set(OptionBit Opt) { + atomic_fetch_or(&Val, 1U << static_cast(Opt), memory_order_relaxed); + } + + void setFillContentsMode(FillContentsMode FillContents) { + while (1) { + u32 Opts = atomic_load(&Val, memory_order_relaxed); + u32 NewOpts = Opts; + NewOpts &= ~(3U << static_cast(OptionBit::FillContents0of2)); + NewOpts |= static_cast(FillContents) + << static_cast(OptionBit::FillContents0of2); + if (atomic_compare_exchange_strong(&Val, &Opts, NewOpts, + memory_order_relaxed)) + break; + } + } +}; + +} // namespace scudo + +#endif // SCUDO_OPTIONS_H_ diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -13,6 +13,7 @@ #include "common.h" #include "list.h" #include "local_cache.h" +#include "options.h" #include "release.h" #include "report.h" #include "stats.h" @@ -206,7 +207,10 @@ return TotalReleasedBytes; } - bool useMemoryTagging() { return false; } + static bool useMemoryTagging(Options Options) { + (void)Options; + return false; + } void disableMemoryTagging() {} const char *getRegionInfoArrayAddress() const { return nullptr; } @@ -218,6 +222,8 @@ return {}; } + AtomicOptions Options; + private: static const uptr NumClasses = SizeClassMap::NumClasses; static const uptr RegionSize = 1UL << RegionSizeLog; diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -14,6 +14,7 @@ #include "list.h" #include "local_cache.h" #include "memtag.h" +#include "options.h" #include "release.h" #include "stats.h" #include "string_utils.h" @@ -93,8 +94,8 @@ } setOption(Option::ReleaseInterval, static_cast(ReleaseToOsInterval)); - if (SupportsMemoryTagging) - UseMemoryTagging = systemSupportsMemoryTagging(); + if (SupportsMemoryTagging && systemSupportsMemoryTagging()) + Options.set(OptionBit::UseMemoryTagging); } void init(s32 ReleaseToOsInterval) { memset(this, 0, sizeof(*this)); @@ -207,10 +208,10 @@ return TotalReleasedBytes; } - bool useMemoryTagging() const { - return SupportsMemoryTagging && UseMemoryTagging; + static bool useMemoryTagging(Options Options) { + return SupportsMemoryTagging && Options.get(OptionBit::UseMemoryTagging); } - void disableMemoryTagging() { UseMemoryTagging = false; } + void disableMemoryTagging() { Options.clear(OptionBit::UseMemoryTagging); } const char *getRegionInfoArrayAddress() const { return reinterpret_cast(RegionInfoArray); @@ -262,6 +263,8 @@ return B; } + AtomicOptions Options; + private: static const uptr RegionSize = 1UL << RegionSizeLog; static const uptr NumClasses = SizeClassMap::NumClasses; @@ -306,7 +309,6 @@ uptr PrimaryBase; MapPlatformData Data; atomic_s32 ReleaseToOsIntervalMs; - bool UseMemoryTagging; alignas(SCUDO_CACHE_LINE_SIZE) RegionInfo RegionInfoArray[NumClasses]; RegionInfo *getRegionInfo(uptr ClassId) { @@ -373,7 +375,7 @@ if (UNLIKELY(!map(reinterpret_cast(RegionBeg + MappedUser), UserMapSize, "scudo:primary", MAP_ALLOWNOMEM | MAP_RESIZABLE | - (useMemoryTagging() ? MAP_MEMTAG : 0), + (useMemoryTagging(Options.load()) ? MAP_MEMTAG : 0), &Region->Data))) return nullptr; Region->MappedUser += UserMapSize; diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc --- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc +++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc @@ -234,30 +234,26 @@ // Disable memory tagging for the heap. The caller must disable memory tag // checks globally (e.g. by clearing TCF0 on aarch64) before calling this -// function, and may not re-enable them after calling the function. The program -// must be single threaded at the point when the function is called. +// function, and may not re-enable them after calling the function. INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() { SCUDO_ALLOCATOR.disableMemoryTagging(); } // Sets whether scudo records stack traces and other metadata for allocations // and deallocations. This function only has an effect if the allocator and -// hardware support memory tagging. The program must be single threaded at the -// point when the function is called. +// hardware support memory tagging. INTERFACE WEAK void SCUDO_PREFIX(malloc_set_track_allocation_stacks)(int track) { SCUDO_ALLOCATOR.setTrackAllocationStacks(track); } -// Sets whether scudo zero-initializes all allocated memory. The program must -// be single threaded at the point when the function is called. +// Sets whether scudo zero-initializes all allocated memory. INTERFACE WEAK void SCUDO_PREFIX(malloc_set_zero_contents)(int zero_contents) { SCUDO_ALLOCATOR.setFillContents(zero_contents ? scudo::ZeroFill : scudo::NoFill); } -// Sets whether scudo pattern-initializes all allocated memory. The program must -// be single threaded at the point when the function is called. +// Sets whether scudo pattern-initializes all allocated memory. INTERFACE WEAK void SCUDO_PREFIX(malloc_set_pattern_fill_contents)(int pattern_fill_contents) { SCUDO_ALLOCATOR.setFillContents(