diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -247,6 +247,16 @@ #endif } + uptr computeOddEvenMaskForPointer(uptr Ptr, uptr Size) { + // If a chunk's tag is odd, we want the tags of the surrounding blocks to be + // even, and vice versa. Blocks are laid out Size bytes apart, and adding + // Size to Ptr will flip the least significant set bit of Size in Ptr, so + // that bit will have the pattern 010101... for consecutive blocks, which we + // can use to determine which tag mask to use. + return (Ptr & (1ULL << getLeastSignificantSetBitIndex(Size))) ? 0xaaaa + : 0x5555; + } + NOINLINE void *allocate(uptr Size, Chunk::Origin Origin, uptr Alignment = MinAlignment, bool ZeroContents = false) { @@ -346,7 +356,8 @@ if (UNLIKELY(useMemoryTagging())) { uptr PrevUserPtr; Chunk::UnpackedHeader Header; - const uptr BlockEnd = BlockUptr + PrimaryT::getSizeByClassId(ClassId); + const uptr BlockSize = PrimaryT::getSizeByClassId(ClassId); + const uptr BlockEnd = BlockUptr + BlockSize; // If possible, try to reuse the UAF tag that was set by deallocate(). // For simplicity, only reuse tags if we have the same start address as // the previous allocation. This handles the majority of cases since @@ -396,7 +407,9 @@ memset(TaggedPtr, 0, archMemoryTagGranuleSize()); } } else { - TaggedPtr = prepareTaggedChunk(Ptr, Size, BlockEnd); + const uptr OddEvenMask = + computeOddEvenMaskForPointer(BlockUptr, BlockSize); + TaggedPtr = prepareTaggedChunk(Ptr, Size, OddEvenMask, BlockEnd); } storeAllocationStackMaybe(Ptr); } else if (UNLIKELY(FillContents != NoFill)) { @@ -987,9 +1000,13 @@ if (UNLIKELY(NewHeader.ClassId && useMemoryTagging())) { u8 PrevTag = extractTag(loadTag(reinterpret_cast(Ptr))); uptr TaggedBegin, TaggedEnd; + const uptr OddEvenMask = computeOddEvenMaskForPointer( + reinterpret_cast(getBlockBegin(Ptr, &NewHeader)), + SizeClassMap::getSizeByClassId(NewHeader.ClassId)); // Exclude the previous tag so that immediate use after free is detected // 100% of the time. - setRandomTag(Ptr, Size, 1UL << PrevTag, &TaggedBegin, &TaggedEnd); + setRandomTag(Ptr, Size, OddEvenMask | (1UL << PrevTag), &TaggedBegin, + &TaggedEnd); storeDeallocationStackMaybe(Ptr, PrevTag); } // If the quarantine is disabled, the actual size of a chunk is 0 or larger diff --git a/compiler-rt/lib/scudo/standalone/memtag.h b/compiler-rt/lib/scudo/standalone/memtag.h --- a/compiler-rt/lib/scudo/standalone/memtag.h +++ b/compiler-rt/lib/scudo/standalone/memtag.h @@ -126,7 +126,8 @@ : "memory"); } -inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr BlockEnd) { +inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask, + uptr BlockEnd) { // Prepare the granule before the chunk to store the chunk header by setting // its tag to 0. Normally its tag will already be 0, but in the case where a // chunk holding a low alignment allocation is reused for a higher alignment @@ -138,7 +139,7 @@ : "memory"); uptr TaggedBegin, TaggedEnd; - setRandomTag(Ptr, Size, 0, &TaggedBegin, &TaggedEnd); + setRandomTag(Ptr, Size, ExcludeMask, &TaggedBegin, &TaggedEnd); // Finally, set the tag of the granule past the end of the allocation to 0, // to catch linear overflows even if a previous larger allocation used the @@ -235,9 +236,11 @@ UNREACHABLE("memory tagging not supported"); } -inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr BlockEnd) { +inline void *prepareTaggedChunk(void *Ptr, uptr Size, uptr ExcludeMask, + uptr BlockEnd) { (void)Ptr; (void)Size; + (void)ExcludeMask; (void)BlockEnd; UNREACHABLE("memory tagging not supported"); } diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -491,3 +491,50 @@ } EXPECT_EQ(FailedAllocationsCount, 0U); } + +TEST(ScudoCombinedTest, OddEven) { + using AllocatorT = scudo::Allocator; + using SizeClassMap = AllocatorT::PrimaryT::SizeClassMap; + auto Deleter = [](AllocatorT *A) { + A->unmapTestOnly(); + delete A; + }; + std::unique_ptr Allocator(new AllocatorT, + Deleter); + Allocator->reset(); + Allocator->releaseToOS(); + + if (!Allocator->useMemoryTagging() || + !scudo::systemDetectsMemoryTagFaultsTestOnly()) + return; + + auto CheckOddEven = [](scudo::uptr P1, scudo::uptr P2) { + scudo::uptr Tag1 = scudo::extractTag(scudo::loadTag(P1)); + scudo::uptr Tag2 = scudo::extractTag(scudo::loadTag(P2)); + EXPECT_NE(Tag1 % 2, Tag2 % 2); + }; + + for (scudo::uptr ClassId = 1U; ClassId <= SizeClassMap::LargestClassId; + ClassId++) { + const scudo::uptr Size = SizeClassMap::getSizeByClassId(ClassId); + + std::set Ptrs; + bool Found = false; + for (unsigned I = 0; I != 65536; ++I) { + scudo::uptr P = scudo::untagPointer(reinterpret_cast( + Allocator->allocate(Size - scudo::Chunk::getHeaderSize(), Origin))); + if (Ptrs.count(P - Size)) { + Found = true; + CheckOddEven(P, P - Size); + break; + } + if (Ptrs.count(P + Size)) { + Found = true; + CheckOddEven(P, P + Size); + break; + } + Ptrs.insert(P); + } + EXPECT_TRUE(Found); + } +}