Index: include/llvm/Support/Memory.h =================================================================== --- include/llvm/Support/Memory.h +++ include/llvm/Support/Memory.h @@ -31,14 +31,19 @@ /// Memory block abstraction. class MemoryBlock { public: - MemoryBlock() : Address(nullptr), Size(0) { } - MemoryBlock(void *addr, size_t size) : Address(addr), Size(size) { } + MemoryBlock() : Address(nullptr), Size(0), AllocatedSize(0) { } + MemoryBlock(void *addr, size_t size, size_t allocatedSize) : Address(addr), Size(size), AllocatedSize(allocatedSize) { } void *base() const { return Address; } + /// The size in bytes of the memory area that was originally requested for this MemoryBlock, or 0 if the request failed. size_t size() const { return Size; } + /// The size in bytes of the memory area as it was allocated for this MemoryBlock. + /// Always larger or equal to Size. + size_t allocatedSize() const { return AllocatedSize; } private: void *Address; ///< Address of first byte of memory area - size_t Size; ///< Size, in bytes of the memory area + size_t Size; ///< Size, in bytes of the memory area as it was originally requested or 0 if the request failed. + size_t AllocatedSize; ///< Size, in bytes of the memory area as it was allocated, always larger or equal to Size. unsigned Flags = 0; friend class Memory; }; @@ -140,6 +145,7 @@ } void *base() const { return M.base(); } size_t size() const { return M.size(); } + size_t allocatedSize() const { return M.allocatedSize(); } MemoryBlock getMemoryBlock() const { return M; } private: MemoryBlock M; Index: lib/ExecutionEngine/Orc/OrcABISupport.cpp =================================================================== --- lib/ExecutionEngine/Orc/OrcABISupport.cpp +++ lib/ExecutionEngine/Orc/OrcABISupport.cpp @@ -161,10 +161,11 @@ return errorCodeToError(EC); // Create separate MemoryBlocks representing the stubs and pointers. - sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize); + sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize, + NumPages * PageSize); sys::MemoryBlock PtrsBlock(static_cast(StubsMem.base()) + NumPages * PageSize, - NumPages * PageSize); + NumPages * PageSize, NumPages * PageSize); // Populate the stubs page stubs and mark it executable. uint64_t *Stub = reinterpret_cast(StubsBlock.base()); @@ -243,10 +244,11 @@ return errorCodeToError(EC); // Create separate MemoryBlocks representing the stubs and pointers. - sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize); + sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize, + NumPages * PageSize); sys::MemoryBlock PtrsBlock(static_cast(StubsMem.base()) + NumPages * PageSize, - NumPages * PageSize); + NumPages * PageSize, NumPages * PageSize); // Populate the stubs page stubs and mark it executable. uint64_t *Stub = reinterpret_cast(StubsBlock.base()); @@ -511,10 +513,11 @@ return errorCodeToError(EC); // Create separate MemoryBlocks representing the stubs and pointers. - sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize); + sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize, + NumPages * PageSize); sys::MemoryBlock PtrsBlock(static_cast(StubsMem.base()) + NumPages * PageSize, - NumPages * PageSize); + NumPages * PageSize, NumPages * PageSize); // Populate the stubs page stubs and mark it executable. uint64_t *Stub = reinterpret_cast(StubsBlock.base()); @@ -697,10 +700,11 @@ return errorCodeToError(EC); // Create separate MemoryBlocks representing the stubs and pointers. - sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize); + sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize, + NumPages * PageSize); sys::MemoryBlock PtrsBlock(static_cast(StubsMem.base()) + NumPages * PageSize, - NumPages * PageSize); + NumPages * PageSize, NumPages * PageSize); // Populate the stubs page stubs and mark it executable. uint32_t *Stub = reinterpret_cast(StubsBlock.base()); @@ -943,10 +947,11 @@ return errorCodeToError(EC); // Create separate MemoryBlocks representing the stubs and pointers. - sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize); + sys::MemoryBlock StubsBlock(StubsMem.base(), NumPages * PageSize, + NumPages * PageSize); sys::MemoryBlock PtrsBlock(static_cast(StubsMem.base()) + NumPages * PageSize, - NumPages * PageSize); + NumPages * PageSize, NumPages * PageSize); // Populate the stubs page stubs and mark it executable. uint32_t *Stub = reinterpret_cast(StubsBlock.base()); Index: lib/ExecutionEngine/SectionMemoryManager.cpp =================================================================== --- lib/ExecutionEngine/SectionMemoryManager.cpp +++ lib/ExecutionEngine/SectionMemoryManager.cpp @@ -64,15 +64,15 @@ // Look in the list of free memory regions and use a block there if one // is available. for (FreeMemBlock &FreeMB : MemGroup.FreeMem) { - if (FreeMB.Free.size() >= RequiredSize) { + if (FreeMB.Free.allocatedSize() >= RequiredSize) { Addr = (uintptr_t)FreeMB.Free.base(); - uintptr_t EndOfBlock = Addr + FreeMB.Free.size(); + uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize(); // Align the address. Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1); if (FreeMB.PendingPrefixIndex == (unsigned)-1) { // The part of the block we're giving out to the user is now pending - MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size)); + MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size, Size)); // Remember this pending block, such that future allocations can just // modify it rather than creating a new one @@ -80,13 +80,14 @@ } else { sys::MemoryBlock &PendingMB = MemGroup.PendingMem[FreeMB.PendingPrefixIndex]; - PendingMB = sys::MemoryBlock(PendingMB.base(), - Addr + Size - (uintptr_t)PendingMB.base()); + uintptr_t BlockSize = Addr + Size - (uintptr_t)PendingMB.base(); + PendingMB = sys::MemoryBlock(PendingMB.base(), BlockSize, BlockSize); } // Remember how much free space is now left in this block + std::size_t RemainingSize = EndOfBlock - Addr - Size; FreeMB.Free = - sys::MemoryBlock((void *)(Addr + Size), EndOfBlock - Addr - Size); + sys::MemoryBlock((void *)(Addr + Size), RemainingSize, RemainingSize); return (uint8_t *)Addr; } } @@ -115,20 +116,20 @@ // Remember that we allocated this memory MemGroup.AllocatedMem.push_back(MB); Addr = (uintptr_t)MB.base(); - uintptr_t EndOfBlock = Addr + MB.size(); + uintptr_t EndOfBlock = Addr + MB.allocatedSize(); // Align the address. Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1); // The part of the block we're giving out to the user is now pending - MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size)); + MemGroup.PendingMem.push_back(sys::MemoryBlock((void *)Addr, Size, Size)); // The allocateMappedMemory may allocate much more memory than we need. In // this case, we store the unused memory as a free memory block. unsigned FreeSize = EndOfBlock - Addr - Size; if (FreeSize > 16) { FreeMemBlock FreeMB; - FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize); + FreeMB.Free = sys::MemoryBlock((void *)(Addr + Size), FreeSize, FreeSize); FreeMB.PendingPrefixIndex = (unsigned)-1; MemGroup.FreeMem.push_back(FreeMB); } @@ -177,16 +178,17 @@ size_t StartOverlap = (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize; - size_t TrimmedSize = M.size(); + size_t TrimmedSize = M.allocatedSize(); TrimmedSize -= StartOverlap; TrimmedSize -= TrimmedSize % PageSize; sys::MemoryBlock Trimmed((void *)((uintptr_t)M.base() + StartOverlap), - TrimmedSize); + TrimmedSize, TrimmedSize); assert(((uintptr_t)Trimmed.base() % PageSize) == 0); assert((Trimmed.size() % PageSize) == 0); - assert(M.base() <= Trimmed.base() && Trimmed.size() <= M.size()); + assert((Trimmed.allocatedSize() % PageSize) == 0); + assert(M.base() <= Trimmed.base() && Trimmed.size() <= M.size() && Trimmed.allocatedSize() <= M.allocatedSize()); return Trimmed; } @@ -211,7 +213,7 @@ // Remove all blocks which are now empty MemGroup.FreeMem.erase( remove_if(MemGroup.FreeMem, - [](FreeMemBlock &FreeMB) { return FreeMB.Free.size() == 0; }), + [](FreeMemBlock &FreeMB) { return FreeMB.Free.allocatedSize() == 0; }), MemGroup.FreeMem.end()); return std::error_code(); @@ -219,7 +221,7 @@ void SectionMemoryManager::invalidateInstructionCache() { for (sys::MemoryBlock &Block : CodeMem.PendingMem) - sys::Memory::InvalidateInstructionCache(Block.base(), Block.size()); + sys::Memory::InvalidateInstructionCache(Block.base(), Block.allocatedSize()); } SectionMemoryManager::~SectionMemoryManager() { @@ -242,11 +244,7 @@ allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose, size_t NumBytes, const sys::MemoryBlock *const NearBlock, unsigned Flags, std::error_code &EC) override { - // allocateMappedMemory calls mmap(2). We round up a request size - // to page size to get extra space for free. - static const size_t PageSize = sys::Process::getPageSize(); - size_t ReqBytes = (NumBytes + PageSize - 1) & ~(PageSize - 1); - return sys::Memory::allocateMappedMemory(ReqBytes, NearBlock, Flags, EC); + return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC); } std::error_code protectMappedMemory(const sys::MemoryBlock &Block, Index: lib/Support/Memory.cpp =================================================================== --- lib/Support/Memory.cpp +++ lib/Support/Memory.cpp @@ -43,7 +43,7 @@ raw_ostream &operator<<(raw_ostream &OS, const MemoryBlock &MB) { return OS << "[ " << MB.base() << " .. " - << (void *)((char *)MB.base() + MB.size()) << " ] (" << MB.size() + << (void *)((char *)MB.base() + MB.allocatedSize()) << " ] (" << MB.allocatedSize() << " bytes)"; } Index: lib/Support/Unix/Memory.inc =================================================================== --- lib/Support/Unix/Memory.inc +++ lib/Support/Unix/Memory.inc @@ -102,13 +102,14 @@ // Use any near hint and the page size to set a page-aligned starting address uintptr_t Start = NearBlock ? reinterpret_cast(NearBlock->base()) + - NearBlock->size() : 0; + NearBlock->allocatedSize() : 0; static const size_t PageSize = Process::getPageSize(); + const size_t NumPages = (NumBytes+PageSize-1)/PageSize; if (Start && Start % PageSize) Start += PageSize - Start % PageSize; // FIXME: Handle huge page requests (MF_HUGE_HINT). - void *Addr = ::mmap(reinterpret_cast(Start), NumBytes, Protect, + void *Addr = ::mmap(reinterpret_cast(Start), PageSize*NumPages, Protect, MMFlags, fd, 0); if (Addr == MAP_FAILED) { if (NearBlock) //Try again without a near hint @@ -121,6 +122,7 @@ MemoryBlock Result; Result.Address = Addr; Result.Size = NumBytes; + Result.AllocatedSize = PageSize*NumPages; Result.Flags = PFlags; // Rely on protectMappedMemory to invalidate instruction cache. @@ -150,7 +152,7 @@ std::error_code Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { static const size_t PageSize = Process::getPageSize(); - if (M.Address == nullptr || M.Size == 0) + if (M.Address == nullptr || M.Size == 0 || M.AllocatedSize == 0) return std::error_code(); if (!Flags) @@ -158,7 +160,7 @@ int Protect = getPosixProtectionFlags(Flags); uintptr_t Start = alignAddr((uint8_t *)M.Address - PageSize + 1, PageSize); - uintptr_t End = alignAddr((uint8_t *)M.Address + M.Size, PageSize); + uintptr_t End = alignAddr((uint8_t *)M.Address + M.AllocatedSize, PageSize); bool InvalidateCache = (Flags & MF_EXEC); @@ -171,7 +173,7 @@ if (Result != 0) return std::error_code(errno, std::generic_category()); - Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize); InvalidateCache = false; } #endif @@ -182,7 +184,7 @@ return std::error_code(errno, std::generic_category()); if (InvalidateCache) - Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize); return std::error_code(); } Index: lib/Support/Windows/Memory.inc =================================================================== --- lib/Support/Windows/Memory.inc +++ lib/Support/Windows/Memory.inc @@ -125,7 +125,7 @@ size_t NumBlocks = (NumBytes + Granularity - 1) / Granularity; uintptr_t Start = NearBlock ? reinterpret_cast(NearBlock->base()) + - NearBlock->size() + NearBlock->allocatedSize() : 0; // If the requested address is not aligned to the allocation granularity, @@ -150,6 +150,7 @@ MemoryBlock Result; Result.Address = PA; Result.Size = NumBytes; + Result.AllocatedSize = AllocSize; Result.Flags = (Flags & ~MF_HUGE_HINT) | (HugePages ? MF_HUGE_HINT : 0); if (Flags & MF_EXEC) @@ -159,7 +160,7 @@ } std::error_code Memory::releaseMappedMemory(MemoryBlock &M) { - if (M.Address == 0 || M.Size == 0) + if (M.Address == 0 || M.Size == 0 || M.AllocatedSize == 0) return std::error_code(); if (!VirtualFree(M.Address, 0, MEM_RELEASE)) @@ -167,23 +168,24 @@ M.Address = 0; M.Size = 0; + M.AllocatedSize = 0; return std::error_code(); } std::error_code Memory::protectMappedMemory(const MemoryBlock &M, - unsigned Flags) { - if (M.Address == 0 || M.Size == 0) + unsigned Flags) { + if (M.Address == 0 || M.Size == 0 || M.AllocatedSize == 0) return std::error_code(); DWORD Protect = getWindowsProtectionFlags(Flags); DWORD OldFlags; - if (!VirtualProtect(M.Address, M.Size, Protect, &OldFlags)) + if (!VirtualProtect(M.Address, M.AllocatedSize, Protect, &OldFlags)) return mapWindowsError(::GetLastError()); if (Flags & MF_EXEC) - Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize); return std::error_code(); } Index: tools/llvm-rtdyld/llvm-rtdyld.cpp =================================================================== --- tools/llvm-rtdyld/llvm-rtdyld.cpp +++ tools/llvm-rtdyld/llvm-rtdyld.cpp @@ -249,7 +249,7 @@ report_fatal_error("Can't allocate enough memory. Tune --preallocate"); uintptr_t OldSlabOffset = CurrentSlabOffset; - sys::MemoryBlock MB((void *)OldSlabOffset, Size); + sys::MemoryBlock MB((void *)OldSlabOffset, Size, Size); if (isCode) FunctionMemory.push_back(SectionInfo(SectionName, MB, SectionID)); else Index: unittests/Support/MemoryTest.cpp =================================================================== --- unittests/Support/MemoryTest.cpp +++ unittests/Support/MemoryTest.cpp @@ -76,9 +76,9 @@ return true; if (M1.base() > M2.base()) - return (unsigned char *)M2.base() + M2.size() > M1.base(); + return (unsigned char *)M2.base() + M2.allocatedSize() > M1.base(); - return (unsigned char *)M1.base() + M1.size() > M2.base(); + return (unsigned char *)M1.base() + M1.allocatedSize() > M2.base(); } unsigned Flags; @@ -132,11 +132,14 @@ EXPECT_EQ(std::error_code(), EC); EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(16U, M1.size()); + EXPECT_EQ(16U, M1.size()); + EXPECT_GE(M1.allocatedSize(), M1.size()); EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(64U, M2.size()); + EXPECT_EQ(64U, M2.size()); + EXPECT_GE(M2.allocatedSize(), M2.size()); EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(32U, M3.size()); + EXPECT_EQ(32U, M3.size()); + EXPECT_GE(M3.allocatedSize(), M3.size()); EXPECT_FALSE(doesOverlap(M1, M2)); EXPECT_FALSE(doesOverlap(M2, M3)); @@ -147,7 +150,8 @@ MemoryBlock M4 = Memory::allocateMappedMemory(16, nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); EXPECT_NE((void*)nullptr, M4.base()); - EXPECT_LE(16U, M4.size()); + EXPECT_EQ(16U, M4.size()); + EXPECT_GE(M4.allocatedSize(), M4.size()); EXPECT_FALSE(Memory::releaseMappedMemory(M4)); EXPECT_FALSE(Memory::releaseMappedMemory(M2)); } @@ -196,11 +200,14 @@ EXPECT_FALSE(doesOverlap(M1, M3)); EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(1U * sizeof(int), M1.size()); + EXPECT_EQ(1U * sizeof(int), M1.size()); + EXPECT_GE(M1.allocatedSize(), M1.size()); EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(8U * sizeof(int), M2.size()); + EXPECT_EQ(8U * sizeof(int), M2.size()); + EXPECT_GE(M2.allocatedSize(), M2.size()); EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(4U * sizeof(int), M3.size()); + EXPECT_EQ(4U * sizeof(int), M3.size()); + EXPECT_GE(M3.allocatedSize(), M3.size()); int *x = (int*)M1.base(); *x = 1; @@ -224,7 +231,8 @@ Flags, EC); EXPECT_EQ(std::error_code(), EC); EXPECT_NE((void*)nullptr, M4.base()); - EXPECT_LE(64U * sizeof(int), M4.size()); + EXPECT_EQ(64U * sizeof(int), M4.size()); + EXPECT_GE(M4.allocatedSize(), M4.size()); x = (int*)M4.base(); *x = 4; EXPECT_EQ(4, *x); @@ -255,11 +263,14 @@ EXPECT_EQ(std::error_code(), EC); EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(2U * sizeof(int), M1.size()); + EXPECT_EQ(2U * sizeof(int), M1.size()); + EXPECT_GE(M1.allocatedSize(), M1.size()); EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(8U * sizeof(int), M2.size()); + EXPECT_EQ(8U * sizeof(int), M2.size()); + EXPECT_GE(M2.allocatedSize(), M2.size()); EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(4U * sizeof(int), M3.size()); + EXPECT_EQ(4U * sizeof(int), M3.size()); + EXPECT_GE(M3.allocatedSize(), M3.size()); EXPECT_FALSE(Memory::protectMappedMemory(M1, getTestableEquivalent(Flags))); EXPECT_FALSE(Memory::protectMappedMemory(M2, getTestableEquivalent(Flags))); @@ -289,7 +300,8 @@ MemoryBlock M4 = Memory::allocateMappedMemory(16, nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); EXPECT_NE((void*)nullptr, M4.base()); - EXPECT_LE(16U, M4.size()); + EXPECT_EQ(16U, M4.size()); + EXPECT_GE(M4.allocatedSize(), M4.size()); EXPECT_EQ(std::error_code(), Memory::protectMappedMemory(M4, getTestableEquivalent(Flags))); x = (int*)M4.base(); @@ -310,11 +322,14 @@ EXPECT_EQ(std::error_code(), EC); EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(16U, M1.size()); + EXPECT_EQ(16U, M1.size()); + EXPECT_GE(M1.allocatedSize(), M1.size()); EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(64U, M2.size()); + EXPECT_EQ(64U, M2.size()); + EXPECT_GE(M2.allocatedSize(), M2.size()); EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(32U, M3.size()); + EXPECT_EQ(32U, M3.size()); + EXPECT_GE(M3.allocatedSize(), M3.size()); EXPECT_FALSE(doesOverlap(M1, M2)); EXPECT_FALSE(doesOverlap(M2, M3)); @@ -328,7 +343,7 @@ TEST_P(MappedMemoryTest, DuplicateNear) { CHECK_UNSUPPORTED(); std::error_code EC; - MemoryBlock Near((void*)(3*PageSize), 16); + MemoryBlock Near((void*)(3*PageSize), 16, 16); MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC); EXPECT_EQ(std::error_code(), EC); MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC); @@ -337,11 +352,14 @@ EXPECT_EQ(std::error_code(), EC); EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(16U, M1.size()); + EXPECT_EQ(16U, M1.size()); + EXPECT_GE(M1.allocatedSize(), M1.size()); EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(64U, M2.size()); + EXPECT_EQ(64U, M2.size()); + EXPECT_GE(M2.allocatedSize(), M2.size()); EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(32U, M3.size()); + EXPECT_EQ(32U, M3.size()); + EXPECT_GE(M3.allocatedSize(), M3.size()); EXPECT_FALSE(Memory::releaseMappedMemory(M1)); EXPECT_FALSE(Memory::releaseMappedMemory(M3)); @@ -351,7 +369,7 @@ TEST_P(MappedMemoryTest, ZeroNear) { CHECK_UNSUPPORTED(); std::error_code EC; - MemoryBlock Near(nullptr, 0); + MemoryBlock Near(nullptr, 0, 0); MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC); EXPECT_EQ(std::error_code(), EC); MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC); @@ -360,11 +378,14 @@ EXPECT_EQ(std::error_code(), EC); EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(16U, M1.size()); + EXPECT_EQ(16U, M1.size()); + EXPECT_GE(M1.allocatedSize(), M1.size()); EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(64U, M2.size()); + EXPECT_EQ(64U, M2.size()); + EXPECT_GE(M2.allocatedSize(), M2.size()); EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(32U, M3.size()); + EXPECT_EQ(32U, M3.size()); + EXPECT_GE(M3.allocatedSize(), M3.size()); EXPECT_FALSE(doesOverlap(M1, M2)); EXPECT_FALSE(doesOverlap(M2, M3)); @@ -378,7 +399,7 @@ TEST_P(MappedMemoryTest, ZeroSizeNear) { CHECK_UNSUPPORTED(); std::error_code EC; - MemoryBlock Near((void*)(4*PageSize), 0); + MemoryBlock Near((void*)(4*PageSize), 0, 0); MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC); EXPECT_EQ(std::error_code(), EC); MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC); @@ -387,11 +408,14 @@ EXPECT_EQ(std::error_code(), EC); EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(16U, M1.size()); + EXPECT_EQ(16U, M1.size()); + EXPECT_GE(M1.allocatedSize(), M1.size()); EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(64U, M2.size()); + EXPECT_EQ(64U, M2.size()); + EXPECT_GE(M2.allocatedSize(), M2.size()); EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(32U, M3.size()); + EXPECT_EQ(32U, M3.size()); + EXPECT_GE(M3.allocatedSize(), M3.size()); EXPECT_FALSE(doesOverlap(M1, M2)); EXPECT_FALSE(doesOverlap(M2, M3)); @@ -405,12 +429,13 @@ TEST_P(MappedMemoryTest, UnalignedNear) { CHECK_UNSUPPORTED(); std::error_code EC; - MemoryBlock Near((void*)(2*PageSize+5), 0); + MemoryBlock Near((void*)(2*PageSize+5), 0, 0); MemoryBlock M1 = Memory::allocateMappedMemory(15, &Near, Flags, EC); EXPECT_EQ(std::error_code(), EC); EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(sizeof(int), M1.size()); + EXPECT_EQ(15U, M1.size()); + EXPECT_GE(M1.allocatedSize(), M1.size()); EXPECT_FALSE(Memory::releaseMappedMemory(M1)); }