Index: compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h =================================================================== --- compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h +++ compiler-rt/trunk/lib/sanitizer_common/sanitizer_allocator_combined.h @@ -49,16 +49,29 @@ size = 1; if (size + alignment < size) return ReturnNullOrDieOnBadRequest(); if (check_rss_limit && RssLimitIsExceeded()) return ReturnNullOrDieOnOOM(); + uptr original_size = size; + // If alignment requirements are to be fulfilled by the frontend allocator + // rather than by the primary or secondary, passing an alignment lower than + // or equal to 8 will prevent any further rounding up, as well as the later + // alignment check. if (alignment > 8) size = RoundUpTo(size, alignment); void *res; bool from_primary = primary_.CanAllocate(size, alignment); + // The primary allocator should return a 2^x aligned allocation when + // requested 2^x bytes, hence using the rounded up 'size' when being + // serviced by the primary. The secondary takes care of the alignment + // without such requirement, and allocating 'size' would use extraneous + // memory, so we employ 'original_size'. if (from_primary) res = cache->Allocate(&primary_, primary_.ClassID(size)); else - res = secondary_.Allocate(&stats_, size, alignment); + res = secondary_.Allocate(&stats_, original_size, alignment); if (alignment > 8) CHECK_EQ(reinterpret_cast(res) & (alignment - 1), 0); + // When serviced by the secondary, the chunk comes from a mmap allocation + // and will be zero'd out anyway. We only need to clear our the chunk if + // it was serviced by the primary, hence using the rounded up 'size'. if (cleared && res && from_primary) internal_bzero_aligned16(res, RoundUpTo(size, 16)); return res; Index: compiler-rt/trunk/lib/scudo/scudo_allocator_secondary.h =================================================================== --- compiler-rt/trunk/lib/scudo/scudo_allocator_secondary.h +++ compiler-rt/trunk/lib/scudo/scudo_allocator_secondary.h @@ -46,7 +46,7 @@ uptr UserBeg = MapBeg + PageSize + HeadersSize; // In the event of larger alignments, we will attempt to fit the mmap area // better and unmap extraneous memory. This will also ensure that the - // offset field of the header stays small (it will always be 0). + // offset and unused bytes field of the header stay small. if (Alignment > MinAlignment) { if (UserBeg & (Alignment - 1)) UserBeg += Alignment - (UserBeg & (Alignment - 1)); @@ -54,8 +54,9 @@ uptr NewMapBeg = UserBeg - HeadersSize; NewMapBeg = RoundDownTo(NewMapBeg, PageSize) - PageSize; CHECK_GE(NewMapBeg, MapBeg); - uptr NewMapSize = RoundUpTo(MapSize - Alignment, PageSize); - uptr NewMapEnd = NewMapBeg + NewMapSize; + uptr NewMapEnd = + RoundUpTo(UserBeg + Size - Alignment - AlignedChunkHeaderSize, + PageSize) + PageSize; CHECK_LE(NewMapEnd, MapEnd); // Unmap the extra memory if it's large enough. uptr Diff = NewMapBeg - MapBeg; @@ -65,8 +66,8 @@ if (Diff > PageSize) UnmapOrDie(reinterpret_cast(NewMapEnd), Diff); MapBeg = NewMapBeg; - MapSize = NewMapSize; MapEnd = NewMapEnd; + MapSize = NewMapEnd - NewMapBeg; } uptr UserEnd = UserBeg - AlignedChunkHeaderSize + Size; // For larger alignments, Alignment was added by the frontend to Size. Index: compiler-rt/trunk/lib/scudo/scudo_utils.cpp =================================================================== --- compiler-rt/trunk/lib/scudo/scudo_utils.cpp +++ compiler-rt/trunk/lib/scudo/scudo_utils.cpp @@ -17,7 +17,9 @@ #include #include #include -#include +#if defined(__x86_64__) || defined(__i386__) +# include +#endif #include