diff --git a/compiler-rt/lib/scudo/standalone/allocator_config.h b/compiler-rt/lib/scudo/standalone/allocator_config.h --- a/compiler-rt/lib/scudo/standalone/allocator_config.h +++ b/compiler-rt/lib/scudo/standalone/allocator_config.h @@ -27,7 +27,7 @@ using SizeClassMap = DefaultSizeClassMap; #if SCUDO_CAN_USE_PRIMARY64 // 1GB Regions - typedef SizeClassAllocator64 Primary; + typedef SizeClassAllocator64 Primary; #else // 512KB regions typedef SizeClassAllocator32 Primary; @@ -40,7 +40,7 @@ using SizeClassMap = AndroidSizeClassMap; #if SCUDO_CAN_USE_PRIMARY64 // 1GB regions - typedef SizeClassAllocator64 Primary; + typedef SizeClassAllocator64 Primary; #else // 512KB regions typedef SizeClassAllocator32 Primary; @@ -54,7 +54,7 @@ using SizeClassMap = SvelteSizeClassMap; #if SCUDO_CAN_USE_PRIMARY64 // 512MB regions - typedef SizeClassAllocator64 Primary; + typedef SizeClassAllocator64 Primary; #else // 64KB regions typedef SizeClassAllocator32 Primary; @@ -66,7 +66,7 @@ struct FuchsiaConfig { // 1GB Regions - typedef SizeClassAllocator64 Primary; + typedef SizeClassAllocator64 Primary; typedef MapAllocator<> Secondary; template using TSDRegistryT = TSDRegistrySharedT; // Shared, max 8 TSDs. diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -15,6 +15,7 @@ #include "flags_parser.h" #include "interface.h" #include "local_cache.h" +#include "memtag.h" #include "quarantine.h" #include "report.h" #include "secondary.h" @@ -160,6 +161,12 @@ TSD->Cache.destroy(&Stats); } + void *maybeUntagPointer(void *Ptr) { + if (Primary.SupportsMemoryTagging) + return untagPointer(Ptr); + return Ptr; + } + NOINLINE void *allocate(uptr Size, Chunk::Origin Origin, uptr Alignment = MinAlignment, bool ZeroContents = false) { @@ -215,12 +222,6 @@ reportOutOfMemory(NeededSize); } - // We only need to zero the contents for Primary backed allocations. This - // condition is not necessarily unlikely, but since memset is costly, we - // might as well mark it as such. - if (UNLIKELY(ZeroContents && ClassId)) - memset(Block, 0, PrimaryT::getSizeByClassId(ClassId)); - Chunk::UnpackedHeader Header = {}; uptr UserPtr = reinterpret_cast(Block) + Chunk::getHeaderSize(); if (UNLIKELY(!isAligned(UserPtr, Alignment))) { @@ -236,18 +237,39 @@ UserPtr = AlignedUserPtr; Header.Offset = (Offset >> MinAlignmentLog) & Chunk::OffsetMask; } + + void *Ptr = reinterpret_cast(UserPtr); + void *TaggedPtr = Ptr; + if (ClassId) { + // We only need to zero or tag the contents for Primary backed + // allocations. We only set tags for primary allocations in order to avoid + // faulting potentially large numbers of pages for large secondary + // allocations. We assume that guard pages are enough to protect these + // allocations. + // + // This condition is not necessarily unlikely, but since memset is costly, + // we might as well mark it as such. When memory tagging is enabled, + // zeroing the contents is done as part of setting the tag. + if (UNLIKELY(useMemoryTagging())) { + TaggedPtr = prepareTaggedChunk(Ptr, Size, + reinterpret_cast(Block) + + PrimaryT::getSizeByClassId(ClassId)); + } else if (UNLIKELY(ZeroContents)) { + memset(Block, 0, PrimaryT::getSizeByClassId(ClassId)); + } + } + Header.ClassId = ClassId & Chunk::ClassIdMask; Header.State = Chunk::State::Allocated; Header.Origin = Origin & Chunk::OriginMask; Header.SizeOrUnusedBytes = (ClassId ? Size : BlockEnd - (UserPtr + Size)) & Chunk::SizeOrUnusedBytesMask; - void *Ptr = reinterpret_cast(UserPtr); Chunk::storeHeader(Cookie, Ptr, &Header); if (&__scudo_allocate_hook) - __scudo_allocate_hook(Ptr, Size); + __scudo_allocate_hook(TaggedPtr, Size); - return Ptr; + return TaggedPtr; } NOINLINE void deallocate(void *Ptr, Chunk::Origin Origin, uptr DeleteSize = 0, @@ -268,6 +290,8 @@ if (UNLIKELY(!isAligned(reinterpret_cast(Ptr), MinAlignment))) reportMisalignedPointer(AllocatorAction::Deallocating, Ptr); + Ptr = maybeUntagPointer(Ptr); + Chunk::UnpackedHeader Header; Chunk::loadHeader(Cookie, Ptr, &Header); @@ -295,6 +319,9 @@ void *reallocate(void *OldPtr, uptr NewSize, uptr Alignment = MinAlignment) { initThreadMaybe(); + void *OldTaggedPtr = OldPtr; + OldPtr = maybeUntagPointer(OldPtr); + // The following cases are handled by the C wrappers. DCHECK_NE(OldPtr, nullptr); DCHECK_NE(NewSize, 0); @@ -343,7 +370,9 @@ : BlockEnd - (reinterpret_cast(OldPtr) + NewSize)) & Chunk::SizeOrUnusedBytesMask; Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader); - return OldPtr; + if (ClassId && useMemoryTagging()) + resizeTaggedChunk(OldTaggedPtr, OldSize, NewSize, BlockEnd); + return OldTaggedPtr; } } @@ -354,7 +383,7 @@ void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment); if (NewPtr) { const uptr OldSize = getSize(OldPtr, &OldHeader); - memcpy(NewPtr, OldPtr, Min(NewSize, OldSize)); + memcpy(NewPtr, OldTaggedPtr, Min(NewSize, OldSize)); quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize); } return NewPtr; @@ -444,6 +473,7 @@ initThreadMaybe(); if (UNLIKELY(!Ptr)) return 0; + Ptr = maybeUntagPointer(const_cast(Ptr)); Chunk::UnpackedHeader Header; Chunk::loadHeader(Cookie, Ptr, &Header); // Getting the usable size of a chunk only makes sense if it's allocated. @@ -457,6 +487,15 @@ Stats.get(S); } + bool useMemoryTagging() { + return Primary.useMemoryTagging(); + } + + void disableMemoryTagging() { + if (useMemoryTagging()) disableMemoryTagChecks(); + Primary.disableMemoryTagging(); + } + private: using SecondaryT = typename Params::Secondary; typedef typename PrimaryT::SizeClassMap SizeClassMap; @@ -468,6 +507,9 @@ static const uptr MaxAllowedMallocSize = FIRST_32_SECOND_64(1UL << 31, 1ULL << 40); + static_assert(!PrimaryT::SupportsMemoryTagging || + MinAlignment >= archMemoryTagGranuleSize()); + // Constants used by the chunk iteration mechanism. static const u32 BlockMarker = 0x44554353U; static const uptr InvalidChunk = ~static_cast(0); diff --git a/compiler-rt/lib/scudo/standalone/common.h b/compiler-rt/lib/scudo/standalone/common.h --- a/compiler-rt/lib/scudo/standalone/common.h +++ b/compiler-rt/lib/scudo/standalone/common.h @@ -142,6 +142,7 @@ #define MAP_ALLOWNOMEM (1U << 0) #define MAP_NOACCESS (1U << 1) #define MAP_RESIZABLE (1U << 2) +#define MAP_MEMTAG (1U << 3) // Our platform memory mapping use is restricted to 3 scenarios: // - reserve memory at a random address (MAP_NOACCESS); diff --git a/compiler-rt/lib/scudo/standalone/linux.cpp b/compiler-rt/lib/scudo/standalone/linux.cpp --- a/compiler-rt/lib/scudo/standalone/linux.cpp +++ b/compiler-rt/lib/scudo/standalone/linux.cpp @@ -35,6 +35,10 @@ #define ANDROID_PR_SET_VMA_ANON_NAME 0 #endif +#ifdef ANDROID_EXPERIMENTAL_MTE +#include +#endif + namespace scudo { uptr getPageSize() { return static_cast(sysconf(_SC_PAGESIZE)); } @@ -50,6 +54,10 @@ MmapProt = PROT_NONE; } else { MmapProt = PROT_READ | PROT_WRITE; +#if defined(__aarch64__) && defined(ANDROID_EXPERIMENTAL_MTE) + if (Flags & MAP_MEMTAG) + MmapProt |= PROT_MTE; +#endif } if (Addr) { // Currently no scenario for a noaccess mapping with a fixed address. diff --git a/compiler-rt/lib/scudo/standalone/memtag.h b/compiler-rt/lib/scudo/standalone/memtag.h new file mode 100644 --- /dev/null +++ b/compiler-rt/lib/scudo/standalone/memtag.h @@ -0,0 +1,192 @@ +//===-- memtag.h ------------------------------------------------*- C++ -*-===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// + +#ifndef SCUDO_MEMTAG_H_ +#define SCUDO_MEMTAG_H_ + +#include "internal_defs.h" + +#include +#if defined(ANDROID_EXPERIMENTAL_MTE) +#include +#endif + +namespace scudo { + +#if defined(__aarch64__) + +inline constexpr bool archSupportsMemoryTagging() { return true; } +inline constexpr size_t archMemoryTagGranuleSize() { return 16; } + +#if defined(ANDROID_EXPERIMENTAL_MTE) + +inline bool systemSupportsMemoryTagging() { + return getauxval(AT_HWCAP2) & HWCAP2_MTE; +} + +#else + +inline bool systemSupportsMemoryTagging() { + return false; +} + +#endif + +inline void disableMemoryTagChecks() { + __asm__ __volatile__(".arch_extension mte; msr tco, #1"); +} + +inline void enableMemoryTagChecksTestOnly() { + __asm__ __volatile__(".arch_extension mte; msr tco, #0"); +} + +inline void *prepareTaggedChunk(void *Ptr, size_t Size, uptr BlockEnd) { + void *TaggedPtr, *Cur, *End; + __asm__ __volatile__( + R"( + .arch_extension mte + + // Prepare the granule before the chunk to store the chunk header by setting + // its tag to 0. Normally its tag will already be 0, but in the case where a + // chunk holding a low alignment allocation is reused for a higher alignment + // allocation, the chunk may already have a non-zero tag from the previous + // allocation. + stg %[Ptr], [%[Ptr], #-16] + + // Set a random tag for Ptr in TaggedPtr. This needs to happen even if + // Size = 0 so that TaggedPtr ends up pointing at a valid address. + irg %[TaggedPtr], %[Ptr] + mov %[Cur], %[TaggedPtr] + + // Skip the loop if Size = 0. We don't want to do any tagging in this case. + cbz %[Size], 2f + + // Set the memory tag of the region + // [TaggedPtr, TaggedPtr + roundUpTo(Size, 16)) + // to the pointer tag stored in TaggedPtr. + add %[End], %[TaggedPtr], %[Size] + + 1: + stzg %[Cur], [%[Cur]], #16 + cmp %[Cur], %[End] + b.lt 1b + + 2: + // Finally, set the tag of the granule past the end of the allocation to 0, + // to catch linear overflows even if a previous larger allocation used the + // same block and tag. Only do this if the granule past the end is in our + // block, because this would otherwise lead to a SEGV if the allocation + // covers the entire block and our block is at the end of a mapping. The tag + // of the next block's header granule will be set to 0, so it will serve the + // purpose of catching linear overflows in this case. + and %[Cur], %[Cur], #(1 << 56) - 1 + cmp %[Cur], %[BlockEnd] + b.eq 3f + stg %[Cur], [%[Cur]] + + 3: + )" + : [ TaggedPtr ] "=&r"(TaggedPtr), [ Cur ] "=&r"(Cur), [ End ] "=&r"(End) + : [ Ptr ] "r"(Ptr), [ Size ] "r"(Size), [ BlockEnd ] "r"(BlockEnd)); + return TaggedPtr; +} + +inline void *untagPointer(void *Ptr) { + return reinterpret_cast(reinterpret_cast(Ptr) & + ((1ULL << 56) - 1)); +} + +inline void resizeTaggedChunk(void *Ptr, size_t OldSize, size_t NewSize, + uptr BlockEnd) { + size_t RoundOldSize = roundUpTo(OldSize, 16); + if (RoundOldSize >= NewSize) { + // If the allocation is shrinking we just need to set the tag past the end + // of the allocation to 0. See explanation in prepareTaggedChunk above. + size_t RoundNewSize = roundUpTo(NewSize, 16); + uptr UntaggedPtr = reinterpret_cast(untagPointer(Ptr)); + if (UntaggedPtr + RoundNewSize != BlockEnd) + __asm__ __volatile__(".arch_extension mte; stg %0, [%0]" + : + : "r"(UntaggedPtr + RoundNewSize)); + return; + } + + void *Cur, *End; + __asm__ __volatile__(R"( + .arch_extension mte + + // Set the memory tag of the region + // [Ptr + roundUpTo(OldSize, 16), Ptr + roundUpTo(NewSize, 16)) + // to the pointer tag stored in Ptr. + add %[Cur], %[Ptr], %[RoundOldSize] + add %[End], %[Ptr], %[NewSize] + + 1: + stzg %[Cur], [%[Cur]], #16 + cmp %[Cur], %[End] + b.lt 1b + + // Finally, set the tag of the granule past the end of the allocation to 0. + and %[Cur], %[Cur], #(1 << 56) - 1 + cmp %[Cur], %[BlockEnd] + b.eq 2f + stg %[Cur], [%[Cur]] + + 2: + )" + : [ Cur ] "=&r"(Cur), [ End ] "=&r"(End) + : [ Ptr ] "r"(Ptr), [ RoundOldSize ] "r"(RoundOldSize), + [ NewSize ] "r"(NewSize), [ BlockEnd ] "r"(BlockEnd)); +} + +#else + +inline constexpr bool archSupportsMemoryTagging() { return false; } + +inline bool systemSupportsMemoryTagging() { + UNREACHABLE("memory tagging not supported"); +} + +inline size_t archMemoryTagGranuleSize() { + UNREACHABLE("memory tagging not supported"); +} + +inline void disableMemoryTagChecks() { + UNREACHABLE("memory tagging not supported"); +} + +inline void enableMemoryTagChecksTestOnly() { + UNREACHABLE("memory tagging not supported"); +} + +inline void *prepareTaggedChunk(void *Ptr, size_t Size, uptr BlockEnd) { + (void)Ptr; + (void)Size; + (void)BlockEnd; + UNREACHABLE("memory tagging not supported"); +} + +inline void resizeTaggedChunk(void *Ptr, size_t OldSize, size_t NewSize, + uptr BlockEnd) { + (void)Ptr; + (void)OldSize; + (void)NewSize; + (void)BlockEnd; + UNREACHABLE("memory tagging not supported"); +} + +inline void *untagPointer(void *Ptr) { + (void)Ptr; + UNREACHABLE("memory tagging not supported"); +} + +#endif + +} + +#endif diff --git a/compiler-rt/lib/scudo/standalone/primary32.h b/compiler-rt/lib/scudo/standalone/primary32.h --- a/compiler-rt/lib/scudo/standalone/primary32.h +++ b/compiler-rt/lib/scudo/standalone/primary32.h @@ -46,6 +46,7 @@ typedef SizeClassAllocator32 ThisT; typedef SizeClassAllocatorLocalCache CacheT; typedef typename CacheT::TransferBatch TransferBatch; + static const bool SupportsMemoryTagging = false; static uptr getSizeByClassId(uptr ClassId) { return (ClassId == SizeClassMap::BatchClassId) @@ -173,6 +174,9 @@ return TotalReleasedBytes; } + bool useMemoryTagging() { return false; } + void disableMemoryTagging() {} + private: static const uptr NumClasses = SizeClassMap::NumClasses; static const uptr RegionSize = 1UL << RegionSizeLog; diff --git a/compiler-rt/lib/scudo/standalone/primary64.h b/compiler-rt/lib/scudo/standalone/primary64.h --- a/compiler-rt/lib/scudo/standalone/primary64.h +++ b/compiler-rt/lib/scudo/standalone/primary64.h @@ -13,6 +13,7 @@ #include "common.h" #include "list.h" #include "local_cache.h" +#include "memtag.h" #include "release.h" #include "stats.h" #include "string_utils.h" @@ -38,12 +39,18 @@ // The memory used by this allocator is never unmapped, but can be partially // released if the platform allows for it. -template class SizeClassAllocator64 { -public: +template +class SizeClassAllocator64 { + public: typedef SizeClassMapT SizeClassMap; - typedef SizeClassAllocator64 ThisT; + typedef SizeClassAllocator64 + ThisT; typedef SizeClassAllocatorLocalCache CacheT; typedef typename CacheT::TransferBatch TransferBatch; + static const bool SupportsMemoryTagging = + MaySupportMemoryTagging && archSupportsMemoryTagging(); static uptr getSizeByClassId(uptr ClassId) { return (ClassId == SizeClassMap::BatchClassId) @@ -85,6 +92,9 @@ Region->RandState = getRandomU32(&Seed); } ReleaseToOsIntervalMs = ReleaseToOsInterval; + + if (SupportsMemoryTagging) + UseMemoryTagging = systemSupportsMemoryTagging(); } void init(s32 ReleaseToOsInterval) { memset(this, 0, sizeof(*this)); @@ -180,6 +190,9 @@ return TotalReleasedBytes; } + bool useMemoryTagging() const { return SupportsMemoryTagging && UseMemoryTagging; } + void disableMemoryTagging() { UseMemoryTagging = false; } + private: static const uptr RegionSize = 1UL << RegionSizeLog; static const uptr NumClasses = SizeClassMap::NumClasses; @@ -221,6 +234,7 @@ RegionInfo *RegionInfoArray; MapPlatformData Data; s32 ReleaseToOsIntervalMs; + bool UseMemoryTagging; RegionInfo *getRegionInfo(uptr ClassId) const { DCHECK_LT(ClassId, NumClasses); @@ -285,12 +299,13 @@ Region->Data = Data; if (UNLIKELY(!map(reinterpret_cast(RegionBeg + MappedUser), UserMapSize, "scudo:primary", - MAP_ALLOWNOMEM | MAP_RESIZABLE, &Region->Data))) + MAP_ALLOWNOMEM | MAP_RESIZABLE | + (useMemoryTagging() ? MAP_MEMTAG : 0), + &Region->Data))) return nullptr; Region->MappedUser += UserMapSize; C->getStats().add(StatMapped, UserMapSize); } - const u32 NumberOfBlocks = Min( MaxNumBatches * MaxCount, static_cast((Region->MappedUser - Region->AllocatedUser) / Size)); diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -32,6 +32,31 @@ "quarantine_max_chunk_size=1024"; } +template +void maybeCheckMemoryTagging(AllocatorT *Allocator, void *P, size_t Size) { + size_t NeededSize = scudo::roundUpTo(Size, 1UL << SCUDO_MIN_ALIGNMENT_LOG) + + scudo::Chunk::getHeaderSize(); + if (!Allocator->useMemoryTagging() || + !AllocatorT::PrimaryT::canAllocate(NeededSize)) + return; + + Size = scudo::roundUpTo(Size, scudo::archMemoryTagGranuleSize()); + EXPECT_DEATH({ +#if SCUDO_ANDROID + // Disable the debuggerd signal handler on Android, without this we can end + // up spending a significant amount of time creating tombstones. + signal(SIGSEGV, SIG_DFL); +#endif + reinterpret_cast(P)[-1] = 0xaa; + }, ""); + EXPECT_DEATH({ +#if SCUDO_ANDROID + signal(SIGSEGV, SIG_DFL); +#endif + reinterpret_cast(P)[Size] = 0xaa; + }, ""); +} + template static void testAllocator() { using AllocatorT = scudo::Allocator; auto Deleter = [](AllocatorT *A) { @@ -59,6 +84,7 @@ EXPECT_TRUE(scudo::isAligned(reinterpret_cast(P), Align)); EXPECT_LE(Size, Allocator->getUsableSize(P)); memset(P, 0xaa, Size); + maybeCheckMemoryTagging(Allocator.get(), P, Size); Allocator->deallocate(P, Origin, Size); } } @@ -86,7 +112,8 @@ bool Found = false; for (scudo::uptr I = 0; I < 1024U && !Found; I++) { void *P = Allocator->allocate(NeedleSize, Origin); - if (P == NeedleP) + if (Allocator->maybeUntagPointer(P) == + Allocator->maybeUntagPointer(NeedleP)) Found = true; Allocator->deallocate(P, Origin); } @@ -123,6 +150,7 @@ EXPECT_EQ(NewP, P); for (scudo::uptr I = 0; I < scudo::Min(DataSize, NewSize); I++) EXPECT_EQ((reinterpret_cast(NewP))[I], Marker); + maybeCheckMemoryTagging(Allocator.get(), NewP, NewSize); } Allocator->deallocate(P, Origin); @@ -151,6 +179,26 @@ Allocator->releaseToOS(); + // Check that disabling memory tagging works correctly. + if (Allocator->useMemoryTagging()) { + void *P = Allocator->allocate(2048, Origin); + EXPECT_DEATH(reinterpret_cast(P)[2048] = 0xaa, ""); + Allocator->disableMemoryTagging(); + reinterpret_cast(P)[2048] = 0xaa; + Allocator->deallocate(P, Origin); + + P = Allocator->allocate(2048, Origin); + EXPECT_EQ(Allocator->maybeUntagPointer(P), P); + reinterpret_cast(P)[2048] = 0xaa; + Allocator->deallocate(P, Origin); + + Allocator->releaseToOS(); + + // The allocator may have disabled memory tag checks globally, which may + // interfere with subsequent tests. Re-enable them now. + scudo::enableMemoryTagChecksTestOnly(); + } + scudo::uptr BufferSize = 8192; std::vector Buffer(BufferSize); scudo::uptr ActualSize = Allocator->getStats(Buffer.data(), BufferSize); diff --git a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/primary_test.cpp @@ -54,13 +54,15 @@ TEST(ScudoPrimaryTest, BasicPrimary) { using SizeClassMap = scudo::DefaultSizeClassMap; testPrimary>(); - testPrimary>(); + testPrimary>(); + testPrimary>(); } // The 64-bit SizeClassAllocator can be easily OOM'd with small region sizes. // For the 32-bit one, it requires actually exhausting memory, so we skip it. TEST(ScudoPrimaryTest, Primary64OOM) { - using Primary = scudo::SizeClassAllocator64; + using Primary = + scudo::SizeClassAllocator64; using TransferBatch = Primary::CacheT::TransferBatch; Primary Allocator; Allocator.init(/*ReleaseToOsInterval=*/-1); @@ -137,7 +139,8 @@ TEST(ScudoPrimaryTest, PrimaryIterate) { using SizeClassMap = scudo::DefaultSizeClassMap; testIteratePrimary>(); - testIteratePrimary>(); + testIteratePrimary>(); + testIteratePrimary>(); } static std::mutex Mutex; @@ -194,7 +197,8 @@ TEST(ScudoPrimaryTest, PrimaryThreaded) { using SizeClassMap = scudo::SvelteSizeClassMap; testPrimaryThreaded>(); - testPrimaryThreaded>(); + testPrimaryThreaded>(); + testPrimaryThreaded>(); } // Through a simple allocation that spans two pages, verify that releaseToOS @@ -222,5 +226,6 @@ TEST(ScudoPrimaryTest, ReleaseToOS) { using SizeClassMap = scudo::DefaultSizeClassMap; testReleaseToOS>(); - testReleaseToOS>(); + testReleaseToOS>(); + testReleaseToOS>(); } diff --git a/compiler-rt/lib/scudo/standalone/wrappers_c.inc b/compiler-rt/lib/scudo/standalone/wrappers_c.inc --- a/compiler-rt/lib/scudo/standalone/wrappers_c.inc +++ b/compiler-rt/lib/scudo/standalone/wrappers_c.inc @@ -176,3 +176,7 @@ fputs("", stream); return 0; } + +INTERFACE WEAK void SCUDO_PREFIX(malloc_disable_memory_tagging)() { + SCUDO_ALLOCATOR.disableMemoryTagging(); +}