diff --git a/compiler-rt/lib/scudo/standalone/combined.h b/compiler-rt/lib/scudo/standalone/combined.h --- a/compiler-rt/lib/scudo/standalone/combined.h +++ b/compiler-rt/lib/scudo/standalone/combined.h @@ -161,6 +161,7 @@ uptr Alignment = MinAlignment, bool ZeroContents = false) { initThreadMaybe(); + ZeroContents = ZeroContents || Options.ZeroContents; if (UNLIKELY(Alignment > MaxAlignment)) { if (Options.MayReturnNull) @@ -200,7 +201,8 @@ TSD->unlock(); } else { ClassId = 0; - Block = Secondary.allocate(NeededSize, Alignment, &BlockEnd); + Block = + Secondary.allocate(NeededSize, Alignment, &BlockEnd, ZeroContents); } if (UNLIKELY(!Block)) { @@ -212,7 +214,7 @@ // We only need to zero the contents for Primary backed allocations. This // condition is not necessarily unlikely, but since memset is costly, we // might as well mark it as such. - if (UNLIKELY((ZeroContents || Options.ZeroContents) && ClassId)) + if (UNLIKELY(ZeroContents && ClassId)) memset(Block, 0, PrimaryT::getSizeByClassId(ClassId)); Chunk::UnpackedHeader Header = {}; diff --git a/compiler-rt/lib/scudo/standalone/secondary.h b/compiler-rt/lib/scudo/standalone/secondary.h --- a/compiler-rt/lib/scudo/standalone/secondary.h +++ b/compiler-rt/lib/scudo/standalone/secondary.h @@ -60,7 +60,8 @@ initLinkerInitialized(S); } - void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr); + void *allocate(uptr Size, uptr AlignmentHint = 0, uptr *BlockEnd = nullptr, + bool ZeroContents = false); void deallocate(void *Ptr); @@ -111,7 +112,8 @@ // (pending rounding and headers). template void *MapAllocator::allocate(uptr Size, uptr AlignmentHint, - uptr *BlockEnd) { + uptr *BlockEnd, + bool ZeroContents) { DCHECK_GT(Size, AlignmentHint); const uptr PageSize = getPageSizeCached(); const uptr RoundedSize = @@ -133,8 +135,11 @@ Stats.add(StatAllocated, FreeBlockSize); if (BlockEnd) *BlockEnd = H.BlockEnd; - return reinterpret_cast(reinterpret_cast(&H) + - LargeBlock::getHeaderSize()); + void *Ptr = reinterpret_cast(reinterpret_cast(&H) + + LargeBlock::getHeaderSize()); + if (ZeroContents) + memset(Ptr, 0, H.BlockEnd - reinterpret_cast(Ptr)); + return Ptr; } } diff --git a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp --- a/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp +++ b/compiler-rt/lib/scudo/standalone/tests/combined_test.cpp @@ -65,6 +65,20 @@ } Allocator->releaseToOS(); + // Ensure that specifying ZeroContents returns a zero'd out block. + for (scudo::uptr SizeLog = 0U; SizeLog <= 20U; SizeLog++) { + for (scudo::uptr Delta = 0U; Delta <= 4U; Delta++) { + const scudo::uptr Size = (1U << SizeLog) + Delta * 128U; + void *P = Allocator->allocate(Size, Origin, 1U << MinAlignLog, true); + EXPECT_NE(P, nullptr); + for (scudo::uptr I = 0; I < Size; I++) + EXPECT_EQ((reinterpret_cast(P))[I], 0); + memset(P, 0xaa, Size); + Allocator->deallocate(P, Origin, Size); + } + } + Allocator->releaseToOS(); + // Verify that a chunk will end up being reused, at some point. const scudo::uptr NeedleSize = 1024U; void *NeedleP = Allocator->allocate(NeedleSize, Origin);