diff --git a/compiler-rt/lib/memprof/memprof_rawprofile.cpp b/compiler-rt/lib/memprof/memprof_rawprofile.cpp --- a/compiler-rt/lib/memprof/memprof_rawprofile.cpp +++ b/compiler-rt/lib/memprof/memprof_rawprofile.cpp @@ -6,6 +6,7 @@ #include "memprof_rawprofile.h" #include "profile/MemProfData.inc" #include "sanitizer_common/sanitizer_allocator_internal.h" +#include "sanitizer_common/sanitizer_common.h" #include "sanitizer_common/sanitizer_linux.h" #include "sanitizer_common/sanitizer_procmaps.h" #include "sanitizer_common/sanitizer_stackdepot.h" @@ -77,7 +78,7 @@ // Store the number of segments we recorded in the space we reserved. *((u64 *)Buffer) = NumSegmentsRecorded; - CHECK(ExpectedNumBytes == static_cast(Ptr - Buffer) && + CHECK(ExpectedNumBytes >= static_cast(Ptr - Buffer) && "Expected num bytes != actual bytes written"); } @@ -132,7 +133,7 @@ *(u64 *)(Ptr - (Count + 1) * sizeof(u64)) = Count; } - CHECK(ExpectedNumBytes == static_cast(Ptr - Buffer) && + CHECK(ExpectedNumBytes >= static_cast(Ptr - Buffer) && "Expected num bytes != actual bytes written"); } @@ -160,7 +161,7 @@ Ptr = WriteBytes((*h)->mib, Ptr); } - CHECK(ExpectedNumBytes == static_cast(Ptr - Buffer) && + CHECK(ExpectedNumBytes >= static_cast(Ptr - Buffer) && "Expected num bytes != actual bytes written"); } @@ -181,11 +182,15 @@ // BuildID 32B // ---------- // ... +// ---------- +// Optional Padding Bytes // ---------- MIB Info // Num Entries // ---------- MIB Entry // Alloc Count // ... +// ---------- +// Optional Padding Bytes // ---------- Stack Info // Num Entries // ---------- Stack Entry @@ -194,23 +199,29 @@ // PC2 // ... // ---------- +// Optional Padding Bytes // ... u64 SerializeToRawProfile(MIBMapTy &MIBMap, MemoryMappingLayoutBase &Layout, char *&Buffer) { - const u64 NumSegmentBytes = SegmentSizeBytes(Layout); + // Each section size is rounded up to 8b since the first entry in each section + // is a u64 which holds the number of entries in the section by convention. + const u64 NumSegmentBytes = RoundUpTo(SegmentSizeBytes(Layout), 8); Vector StackIds; MIBMap.ForEach(RecordStackId, reinterpret_cast(&StackIds)); // The first 8b are for the total number of MIB records. Each MIB record is // preceded by a 8b stack id which is associated with stack frames in the next // section. - const u64 NumMIBInfoBytes = - sizeof(u64) + StackIds.Size() * (sizeof(u64) + sizeof(MemInfoBlock)); + const u64 NumMIBInfoBytes = RoundUpTo( + sizeof(u64) + StackIds.Size() * (sizeof(u64) + sizeof(MemInfoBlock)), 8); - const u64 NumStackBytes = StackSizeBytes(StackIds); + const u64 NumStackBytes = RoundUpTo(StackSizeBytes(StackIds), 8); - const u64 TotalSizeBytes = - sizeof(Header) + NumSegmentBytes + NumStackBytes + NumMIBInfoBytes; + // Ensure that the profile is 8b aligned. We allow for some optional padding + // at the end so that any subsequent profile serialized to the same file does + // not incur unaligned accesses. + const u64 TotalSizeBytes = RoundUpTo( + sizeof(Header) + NumSegmentBytes + NumStackBytes + NumMIBInfoBytes, 8); // Allocate the memory for the entire buffer incl. info blocks. Buffer = (char *)InternalAlloc(TotalSizeBytes); diff --git a/compiler-rt/lib/memprof/tests/rawprofile.cpp b/compiler-rt/lib/memprof/tests/rawprofile.cpp --- a/compiler-rt/lib/memprof/tests/rawprofile.cpp +++ b/compiler-rt/lib/memprof/tests/rawprofile.cpp @@ -49,6 +49,8 @@ template T Read(char *&Buffer) { static_assert(std::is_pod::value, "Must be a POD type."); + assert(reinterpret_cast(Buffer) % sizeof(T) == 0 && + "Unaligned read!"); T t = *reinterpret_cast(Buffer); Buffer += sizeof(T); return t; @@ -103,8 +105,9 @@ const u64 MIBOffset = Read(Ptr); const u64 StackOffset = Read(Ptr); - // ============= Check sizes. + // ============= Check sizes and padding. EXPECT_EQ(TotalSize, NumBytes); + EXPECT_EQ(TotalSize % 8, 0ULL); // Should be equal to the size of the raw profile header. EXPECT_EQ(SegmentOffset, 48ULL); @@ -120,8 +123,10 @@ EXPECT_EQ(StackOffset, 336ULL); // We expect 2 stack entries, with 5 frames - 8b for total count, - // 2 * (8b for id, 8b for frame count and 5*8b for fake frames) - EXPECT_EQ(TotalSize - StackOffset, 8ULL + 2 * (8 + 8 + 5 * 8)); + // 2 * (8b for id, 8b for frame count and 5*8b for fake frames). + // Since this is the last section, there may be additional padding at the end + // to make the total profile size 8b aligned. + EXPECT_GE(TotalSize - StackOffset, 8ULL + 2 * (8 + 8 + 5 * 8)); // ============= Check contents. unsigned char ExpectedSegmentBytes[64] = { diff --git a/llvm/lib/ProfileData/RawMemProfReader.cpp b/llvm/lib/ProfileData/RawMemProfReader.cpp --- a/llvm/lib/ProfileData/RawMemProfReader.cpp +++ b/llvm/lib/ProfileData/RawMemProfReader.cpp @@ -11,6 +11,7 @@ //===----------------------------------------------------------------------===// #include +#include #include "llvm/ProfileData/InstrProf.h" #include "llvm/ProfileData/MemProfData.inc" @@ -28,15 +29,22 @@ uint64_t NumStackOffsets; }; +template inline T alignedRead(const char *Ptr) { + static_assert(std::is_pod::value, "Not a pod type."); + assert(reinterpret_cast(Ptr) % sizeof(T) == 0 && "Unaligned Read"); + return *reinterpret_cast(Ptr); +} + Summary computeSummary(const char *Start) { auto *H = reinterpret_cast(Start); + // Check alignment while reading the number of items in each section. return Summary{ H->Version, H->TotalSize, - *reinterpret_cast(Start + H->SegmentOffset), - *reinterpret_cast(Start + H->MIBOffset), - *reinterpret_cast(Start + H->StackOffset), + alignedRead(Start + H->SegmentOffset), + alignedRead(Start + H->MIBOffset), + alignedRead(Start + H->StackOffset), }; } @@ -84,7 +92,9 @@ bool RawMemProfReader::hasFormat(const MemoryBuffer &Buffer) { if (Buffer.getBufferSize() < sizeof(uint64_t)) return false; - uint64_t Magic = *reinterpret_cast(Buffer.getBufferStart()); + // Aligned read to sanity check that the buffer was allocated with at least 8b + // alignment. + const uint64_t Magic = alignedRead(Buffer.getBufferStart()); return Magic == MEMPROF_RAW_MAGIC_64; } diff --git a/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw b/llvm/test/tools/llvm-profdata/Inputs/basic.memprofraw index 0000000000000000000000000000000000000000..0000000000000000000000000000000000000000 GIT binary patch literal 0 Hc$@