Index: compiler-rt/trunk/lib/scudo/standalone/allocator_config.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/allocator_config.h +++ compiler-rt/trunk/lib/scudo/standalone/allocator_config.h @@ -53,8 +53,8 @@ // 512MB regions typedef SizeClassAllocator64 Primary; #else - // 256KB regions - typedef SizeClassAllocator32 Primary; + // 64KB regions + typedef SizeClassAllocator32 Primary; #endif template using TSDRegistryT = TSDRegistrySharedT; // Shared, only 1 TSD. Index: compiler-rt/trunk/lib/scudo/standalone/combined.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/combined.h +++ compiler-rt/trunk/lib/scudo/standalone/combined.h @@ -311,18 +311,30 @@ OldHeader.Origin, Chunk::Origin::Malloc); } - const uptr OldSize = getSize(OldPtr, &OldHeader); - // If the new size is identical to the old one, or lower but within an - // acceptable range, we just keep the old chunk, and update its header. - if (UNLIKELY(NewSize == OldSize)) - return OldPtr; - if (NewSize < OldSize) { - const uptr Delta = OldSize - NewSize; - if (Delta < (SizeClassMap::MaxSize / 2)) { + void *BlockBegin = getBlockBegin(OldPtr, &OldHeader); + uptr BlockEnd; + uptr OldSize; + const uptr ClassId = OldHeader.ClassId; + if (LIKELY(ClassId)) { + BlockEnd = reinterpret_cast(BlockBegin) + + SizeClassMap::getSizeByClassId(ClassId); + OldSize = OldHeader.SizeOrUnusedBytes; + } else { + BlockEnd = SecondaryT::getBlockEnd(BlockBegin); + OldSize = BlockEnd - + (reinterpret_cast(OldPtr) + OldHeader.SizeOrUnusedBytes); + } + // If the new chunk still fits in the previously allocated block (with a + // reasonable delta), we just keep the old block, and update the chunk + // header to reflect the size change. + if (reinterpret_cast(OldPtr) + NewSize <= BlockEnd) { + const uptr Delta = + OldSize < NewSize ? NewSize - OldSize : OldSize - NewSize; + if (Delta <= SizeClassMap::MaxSize / 2) { Chunk::UnpackedHeader NewHeader = OldHeader; NewHeader.SizeOrUnusedBytes = - (OldHeader.ClassId ? NewHeader.SizeOrUnusedBytes - Delta - : NewHeader.SizeOrUnusedBytes + Delta) & + (ClassId ? NewSize + : BlockEnd - (reinterpret_cast(OldPtr) + NewSize)) & Chunk::SizeOrUnusedBytesMask; Chunk::compareExchangeHeader(Cookie, OldPtr, &NewHeader, &OldHeader); return OldPtr; @@ -335,6 +347,7 @@ // are currently unclear. void *NewPtr = allocate(NewSize, Chunk::Origin::Malloc, Alignment); if (NewPtr) { + const uptr OldSize = getSize(OldPtr, &OldHeader); memcpy(NewPtr, OldPtr, Min(NewSize, OldSize)); quarantineOrDeallocateChunk(OldPtr, &OldHeader, OldSize); } Index: compiler-rt/trunk/lib/scudo/standalone/size_class_map.h =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/size_class_map.h +++ compiler-rt/trunk/lib/scudo/standalone/size_class_map.h @@ -137,11 +137,11 @@ // TODO(kostyak): further tune class maps for Android & Fuchsia. #if SCUDO_WORDSIZE == 64U -typedef SizeClassMap<3, 5, 8, 15, 8, 10> SvelteSizeClassMap; +typedef SizeClassMap<4, 4, 8, 14, 4, 10> SvelteSizeClassMap; typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap; #else -typedef SizeClassMap<3, 4, 7, 15, 8, 10> SvelteSizeClassMap; -typedef SizeClassMap<3, 4, 7, 17, 14, 14> AndroidSizeClassMap; +typedef SizeClassMap<4, 3, 7, 14, 5, 10> SvelteSizeClassMap; +typedef SizeClassMap<3, 5, 8, 17, 14, 14> AndroidSizeClassMap; #endif } // namespace scudo Index: compiler-rt/trunk/lib/scudo/standalone/tests/combined_test.cpp =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/tests/combined_test.cpp +++ compiler-rt/trunk/lib/scudo/standalone/tests/combined_test.cpp @@ -97,6 +97,21 @@ } Allocator->deallocate(P, Origin); + // Check that reallocating a chunk to a slightly smaller or larger size + // returns the same chunk. This requires that all the sizes we iterate on use + // the same block size, but that should be the case for 2048 with our default + // class size maps. + P = Allocator->allocate(DataSize, Origin); + memset(P, Marker, DataSize); + for (scudo::sptr Delta = -32; Delta < 32; Delta += 8) { + const scudo::uptr NewSize = DataSize + Delta; + void *NewP = Allocator->reallocate(P, NewSize); + EXPECT_EQ(NewP, P); + for (scudo::uptr I = 0; I < scudo::Min(DataSize, NewSize); I++) + EXPECT_EQ((reinterpret_cast(NewP))[I], Marker); + } + Allocator->deallocate(P, Origin); + // Allocates a bunch of chunks, then iterate over all the chunks, ensuring // they are the ones we allocated. This requires the allocator to not have any // other allocated chunk at this point (eg: won't work with the Quarantine). Index: compiler-rt/trunk/lib/scudo/standalone/tests/wrappers_c_test.cpp =================================================================== --- compiler-rt/trunk/lib/scudo/standalone/tests/wrappers_c_test.cpp +++ compiler-rt/trunk/lib/scudo/standalone/tests/wrappers_c_test.cpp @@ -12,6 +12,7 @@ #include #include +#include #include extern "C" {