diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -41,7 +41,7 @@ namespace scudo { -enum class Option { ReleaseInterval }; +enum class Option { ReleaseInterval, MemtagTuning }; template class Allocator { @@ -154,6 +154,7 @@ Options.DeallocTypeMismatch = getFlags()->dealloc_type_mismatch; Options.DeleteSizeMismatch = getFlags()->delete_size_mismatch; Options.TrackAllocationStacks = false; + Options.UseOddEvenTags = true; Options.QuarantineMaxChunkSize = static_cast(getFlags()->quarantine_max_chunk_size); @@ -247,6 +248,19 @@ #endif } + uptr computeOddEvenMaskForPointerMaybe(uptr Ptr, uptr Size) { + if (!Options.UseOddEvenTags) + return 0; + + // If a chunk's tag is odd, we want the tags of the surrounding blocks to be + // even, and vice versa. Blocks are laid out Size bytes apart, and adding + // Size to Ptr will flip the least significant set bit of Size in Ptr, so + // that bit will have the pattern 010101... for consecutive blocks, which we + // can use to determine which tag mask to use. + return (Ptr & (1ULL << getLeastSignificantSetBitIndex(Size))) ? 0xaaaa + : 0x5555; + } + NOINLINE void *allocate(uptr Size, Chunk::Origin Origin, uptr Alignment = MinAlignment, bool ZeroContents = false) { @@ -346,7 +360,8 @@ if (UNLIKELY(useMemoryTagging())) { uptr PrevUserPtr; Chunk::UnpackedHeader Header; - const uptr BlockEnd = BlockUptr + PrimaryT::getSizeByClassId(ClassId); + const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId); + const uptr BlockEnd = BlockUptr + BlockSize; // If possible, try to reuse the UAF tag that was set by deallocate(). // For simplicity, only reuse tags if we have the same start address as // the previous allocation. This handles the majority of cases since @@ -396,7 +411,9 @@ memset(TaggedPtr, 0, archMemoryTagGranuleSize()); } } else { - TaggedPtr = prepareTaggedChunk(Ptr, Size, BlockEnd); + const uptr OddEvenMask = + computeOddEvenMaskForPointerMaybe(BlockUptr, BlockSize); + TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd); } storeAllocationStackMaybe(Ptr); } else if (UNLIKELY(FillContents != NoFill)) { @@ -676,6 +693,23 @@ Secondary.setReleaseToOsIntervalMs(static_cast(Value)); return true; } + if (O == Option::MemtagTuning) { + // Enabling odd/even tags involves a tradeoff between use-after-free + // detection and buffer overflow detection. Odd/even tags make it more + // likely for buffer overflows to be detected by increasing the size of + // the guaranteed "red zone" around the allocation, but on the other hand + // use-after-free is less likely to be detected because the tag space for + // any particular chunk is cut in half. Therefore we use this tuning + // setting to control whether odd/even tags are enabled. + if (Value == M_MEMTAG_TUNING_BUFFER_OVERFLOW) { + Options.UseOddEvenTags = true; + return true; + } + if (Value == M_MEMTAG_TUNING_UAF) { + Options.UseOddEvenTags = false; + return true; + } + } return false; } @@ -917,6 +951,7 @@ u8 DeallocTypeMismatch : 1; // dealloc_type_mismatch u8 DeleteSizeMismatch : 1; // delete_size_mismatch u8 TrackAllocationStacks : 1; + u8 UseOddEvenTags : 1; u32 QuarantineMaxChunkSize; // quarantine_max_chunk_size } Options; @@ -987,9 +1022,13 @@ if (UNLIKELY(NewHeader.ClassId && useMemoryTagging())) { u8 PrevTag = extractTag(loadTag(reinterpret_cast(Ptr))); uptr TaggedBegin, TaggedEnd; + const uptr OddEvenMask = computeOddEvenMaskForPointerMaybe( + reinterpret_cast(getBlockBegin(Ptr, &NewHeader)), + SizeClassMap::getSizeByClassId(NewHeader.ClassId)); // Exclude the previous tag so that immediate use after free is detected // 100% of the time. - setRandomTag(Ptr, Size, 1UL << PrevTag, &TaggedBegin, &TaggedEnd); + setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin, + &TaggedEnd); storeDeallocationStackMaybe(Ptr, PrevTag); } // If the quarantine is disabled, the actual size of a chunk is 0 or larger diff --git a/compiler-rt/lib/scudo/standalone/include/scudo/interface.h b/compiler-rt/lib/scudo/standalone/include/scudo/interface.h --- a/compiler-rt/lib/scudo/standalone/include/scudo/interface.h +++ b/compiler-rt/lib/scudo/standalone/include/scudo/interface.h @@ -10,6 +10,7 @@ #define SCUDO_INTERFACE_H_ #include +#include extern "C" { @@ -105,6 +106,29 @@ const char *__scudo_get_region_info_addr(); size_t __scudo_get_region_info_size(); +#ifndef M_DECAY_TIME +#define M_DECAY_TIME -100 +#endif + +#ifndef M_PURGE +#define M_PURGE -101 +#endif + +// Tune the allocator's choice of memory tags to make it more likely that +// a certain class of memory errors will be detected. The value argument should +// be one of the enumerators of the scudo_memtag_tuning enum below. +#ifndef M_MEMTAG_TUNING +#define M_MEMTAG_TUNING -102 +#endif + +enum scudo_memtag_tuning { + // Tune for buffer overflows. + M_MEMTAG_TUNING_BUFFER_OVERFLOW, + + // Tune for use-after-free. + M_MEMTAG_TUNING_UAF, +}; + } // extern "C" #endif // SCUDO_INTERFACE_H_ diff --git a/compiler-rt/lib/scudo/standalone/memtag.h b/compiler-rt/lib/scudo/standalone/memtag.h --- a/compiler-rt/lib/scudo/standalone/memtag.h +++ b/compiler-rt/lib/scudo/standalone/memtag.h @@ -126,7 +126,8 @@ : "memory"); } -inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr BlockEnd) { +inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask, + uptr BlockEnd) { // Prepare the granule before the chunk to store the chunk header by setting // its tag to 0. Normally its tag will already be 0, but in the case where a // chunk holding a low alignment allocation is reused for a higher alignment @@ -138,7 +139,7 @@ : "memory"); uptr TaggedBegin, TaggedEnd; - setRandomTag(Ptr, Size, 0, &TaggedBegin, &TaggedEnd); + setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd); // Finally, set the tag of the granule past the end of the allocation to 0, // to catch linear overflows even if a previous larger allocation used the @@ -235,9 +236,11 @@ UNREACHABLE("memory tagging not supported"); } -inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr BlockEnd) { +inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask, + uptr BlockEnd) { (void)Ptr; (void)Size; + (void)ExcludeMask; (void)BlockEnd; UNREACHABLE("memory tagging not supported"); } diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -491,3 +491,50 @@ } EXPECT_EQ(FailedAllocationsCount, 0U); } + +TEST(ScudoCombinedTest, OddEven) { + using AllocatorT = scudo::Allocator; + using SizeClassMap = AllocatorT::PrimaryT::SizeClassMap; + auto Deleter = [](AllocatorT *A) { + A->unmapTestOnly(); + delete A; + }; + std::unique_ptr Allocator(new AllocatorT, + Deleter); + Allocator->reset(); + Allocator->releaseToOS(); + + if (!Allocator->useMemoryTagging() || + !scudo::systemDetectsMemoryTagFaultsTestOnly()) + return; + + auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) { + scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1)); + scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2)); + EXPECT_NE(Tag1 % 2, Tag2 % 2); + }; + + for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId; + ClassId++) { + const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId); + + std::set Ptrs; + bool Found = false; + for (unsigned I = 0; I != 65536; ++I) { + scudo::uptr P = scudo::untagPointer(reinterpret_cast( + Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin))); + if (Ptrs.count(P - Size)) { + Found = true; + CheckOddEven(P, P - Size); + break; + } + if (Ptrs.count(P + Size)) { + Found = true; + CheckOddEven(P, P + Size); + break; + } + Ptrs.insert(P); + } + EXPECT_TRUE(Found); + } +} diff --git a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/wrappers_c_test.cpp @@ -6,6 +6,7 @@ // //===----------------------------------------------------------------------===// +#include "scudo/interface.h" #include "tests/scudo_unit_test.h" #include @@ -188,14 +189,6 @@ } } -#ifndef M_DECAY_TIME -#define M_DECAY_TIME -100 -#endif - -#ifndef M_PURGE -#define M_PURGE -101 -#endif - #if !SCUDO_FUCHSIA TEST(ScudoWrappersCTest, MallOpt) { errno = 0; diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.h b/compiler-rt/lib/scudo/standalone/wrappers_c.h --- a/compiler-rt/lib/scudo/standalone/wrappers_c.h +++ b/compiler-rt/lib/scudo/standalone/wrappers_c.h @@ -41,12 +41,4 @@ #define SCUDO_MALLINFO __scudo_mallinfo #endif -#ifndef M_DECAY_TIME -#define M_DECAY_TIME -100 -#endif - -#ifndef M_PURGE -#define M_PURGE -101 -#endif - #endif // SCUDO_WRAPPERS_C_H_ diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc --- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc +++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc @@ -173,6 +173,9 @@ } else if (param == M_PURGE) { SCUDO_ALLOCATOR.releaseToOS(); return 1; + } else if (param == M_MEMTAG_TUNING) { + return SCUDO_ALLOCATOR.setOption(scudo::Option::MemtagTuning, + static_cast(value)); } return 0; }