Index: include/llvm/Support/Memory.h =================================================================== --- include/llvm/Support/Memory.h +++ include/llvm/Support/Memory.h @@ -24,135 +24,138 @@ namespace sys { - /// This class encapsulates the notion of a memory block which has an address - /// and a size. It is used by the Memory class (a friend) as the result of - /// various memory allocation operations. - /// @see Memory - /// Memory block abstraction. - class MemoryBlock { - public: - MemoryBlock() : Address(nullptr), Size(0) { } - MemoryBlock(void *addr, size_t size) : Address(addr), Size(size) { } - void *base() const { return Address; } - size_t size() const { return Size; } +/// This class encapsulates the notion of a memory block which has an address +/// and a size. It is used by the Memory class (a friend) as the result of +/// various memory allocation operations. +/// @see Memory +/// Memory block abstraction. +class MemoryBlock { +public: + MemoryBlock() : Address(nullptr), AllocatedSize(0) {} + MemoryBlock(void *addr, size_t allocatedSize) + : Address(addr), AllocatedSize(allocatedSize) {} + void *base() const { return Address; } + /// The size as it was allocated. This is always greater or equal to the + /// size that was originally requested. + size_t allocatedSize() const { return AllocatedSize; } - private: - void *Address; ///< Address of first byte of memory area - size_t Size; ///< Size, in bytes of the memory area - unsigned Flags = 0; - friend class Memory; +private: + void *Address; ///< Address of first byte of memory area + size_t AllocatedSize; ///< Size, in bytes of the memory area + unsigned Flags = 0; + friend class Memory; +}; + +/// This class provides various memory handling functions that manipulate +/// MemoryBlock instances. +/// @since 1.4 +/// An abstraction for memory operations. +class Memory { +public: + enum ProtectionFlags { + MF_READ = 0x1000000, + MF_WRITE = 0x2000000, + MF_EXEC = 0x4000000, + MF_RWE_MASK = 0x7000000, + MF_HUGE_HINT = 0x0000001 }; - /// This class provides various memory handling functions that manipulate - /// MemoryBlock instances. - /// @since 1.4 - /// An abstraction for memory operations. - class Memory { - public: - enum ProtectionFlags { - MF_READ = 0x1000000, - MF_WRITE = 0x2000000, - MF_EXEC = 0x4000000, - MF_RWE_MASK = 0x7000000, - MF_HUGE_HINT = 0x0000001 - }; + /// This method allocates a block of memory that is suitable for loading + /// dynamically generated code (e.g. JIT). An attempt to allocate + /// \p NumBytes bytes of virtual memory is made. + /// \p NearBlock may point to an existing allocation in which case + /// an attempt is made to allocate more memory near the existing block. + /// The actual allocated address is not guaranteed to be near the requested + /// address. + /// \p Flags is used to set the initial protection flags for the block + /// of the memory. + /// \p EC [out] returns an object describing any error that occurs. + /// + /// This method may allocate more than the number of bytes requested. The + /// actual number of bytes allocated is indicated in the returned + /// MemoryBlock. + /// + /// The start of the allocated block must be aligned with the + /// system allocation granularity (64K on Windows, page size on Linux). + /// If the address following \p NearBlock is not so aligned, it will be + /// rounded up to the next allocation granularity boundary. + /// + /// \r a non-null MemoryBlock if the function was successful, + /// otherwise a null MemoryBlock is with \p EC describing the error. + /// + /// Allocate mapped memory. + static MemoryBlock allocateMappedMemory(size_t NumBytes, + const MemoryBlock *const NearBlock, + unsigned Flags, std::error_code &EC); - /// This method allocates a block of memory that is suitable for loading - /// dynamically generated code (e.g. JIT). An attempt to allocate - /// \p NumBytes bytes of virtual memory is made. - /// \p NearBlock may point to an existing allocation in which case - /// an attempt is made to allocate more memory near the existing block. - /// The actual allocated address is not guaranteed to be near the requested - /// address. - /// \p Flags is used to set the initial protection flags for the block - /// of the memory. - /// \p EC [out] returns an object describing any error that occurs. - /// - /// This method may allocate more than the number of bytes requested. The - /// actual number of bytes allocated is indicated in the returned - /// MemoryBlock. - /// - /// The start of the allocated block must be aligned with the - /// system allocation granularity (64K on Windows, page size on Linux). - /// If the address following \p NearBlock is not so aligned, it will be - /// rounded up to the next allocation granularity boundary. - /// - /// \r a non-null MemoryBlock if the function was successful, - /// otherwise a null MemoryBlock is with \p EC describing the error. - /// - /// Allocate mapped memory. - static MemoryBlock allocateMappedMemory(size_t NumBytes, - const MemoryBlock *const NearBlock, - unsigned Flags, - std::error_code &EC); + /// This method releases a block of memory that was allocated with the + /// allocateMappedMemory method. It should not be used to release any + /// memory block allocated any other way. + /// \p Block describes the memory to be released. + /// + /// \r error_success if the function was successful, or an error_code + /// describing the failure if an error occurred. + /// + /// Release mapped memory. + static std::error_code releaseMappedMemory(MemoryBlock &Block); - /// This method releases a block of memory that was allocated with the - /// allocateMappedMemory method. It should not be used to release any - /// memory block allocated any other way. - /// \p Block describes the memory to be released. - /// - /// \r error_success if the function was successful, or an error_code - /// describing the failure if an error occurred. - /// - /// Release mapped memory. - static std::error_code releaseMappedMemory(MemoryBlock &Block); + /// This method sets the protection flags for a block of memory to the + /// state specified by /p Flags. The behavior is not specified if the + /// memory was not allocated using the allocateMappedMemory method. + /// \p Block describes the memory block to be protected. + /// \p Flags specifies the new protection state to be assigned to the block. + /// \p ErrMsg [out] returns a string describing any error that occurred. + /// + /// If \p Flags is MF_WRITE, the actual behavior varies + /// with the operating system (i.e. MF_READ | MF_WRITE on Windows) and the + /// target architecture (i.e. MF_WRITE -> MF_READ | MF_WRITE on i386). + /// + /// \r error_success if the function was successful, or an error_code + /// describing the failure if an error occurred. + /// + /// Set memory protection state. + static std::error_code protectMappedMemory(const MemoryBlock &Block, + unsigned Flags); - /// This method sets the protection flags for a block of memory to the - /// state specified by /p Flags. The behavior is not specified if the - /// memory was not allocated using the allocateMappedMemory method. - /// \p Block describes the memory block to be protected. - /// \p Flags specifies the new protection state to be assigned to the block. - /// \p ErrMsg [out] returns a string describing any error that occurred. - /// - /// If \p Flags is MF_WRITE, the actual behavior varies - /// with the operating system (i.e. MF_READ | MF_WRITE on Windows) and the - /// target architecture (i.e. MF_WRITE -> MF_READ | MF_WRITE on i386). - /// - /// \r error_success if the function was successful, or an error_code - /// describing the failure if an error occurred. - /// - /// Set memory protection state. - static std::error_code protectMappedMemory(const MemoryBlock &Block, - unsigned Flags); + /// InvalidateInstructionCache - Before the JIT can run a block of code + /// that has been emitted it must invalidate the instruction cache on some + /// platforms. + static void InvalidateInstructionCache(const void *Addr, size_t Len); +}; - /// InvalidateInstructionCache - Before the JIT can run a block of code - /// that has been emitted it must invalidate the instruction cache on some - /// platforms. - static void InvalidateInstructionCache(const void *Addr, size_t Len); - }; +/// Owning version of MemoryBlock. +class OwningMemoryBlock { +public: + OwningMemoryBlock() = default; + explicit OwningMemoryBlock(MemoryBlock M) : M(M) {} + OwningMemoryBlock(OwningMemoryBlock &&Other) { + M = Other.M; + Other.M = MemoryBlock(); + } + OwningMemoryBlock &operator=(OwningMemoryBlock &&Other) { + M = Other.M; + Other.M = MemoryBlock(); + return *this; + } + ~OwningMemoryBlock() { Memory::releaseMappedMemory(M); } + void *base() const { return M.base(); } + /// The size as it was allocated. This is always greater or equal to the + /// size that was originally requested. + size_t allocatedSize() const { return M.allocatedSize(); } + MemoryBlock getMemoryBlock() const { return M; } - /// Owning version of MemoryBlock. - class OwningMemoryBlock { - public: - OwningMemoryBlock() = default; - explicit OwningMemoryBlock(MemoryBlock M) : M(M) {} - OwningMemoryBlock(OwningMemoryBlock &&Other) { - M = Other.M; - Other.M = MemoryBlock(); - } - OwningMemoryBlock& operator=(OwningMemoryBlock &&Other) { - M = Other.M; - Other.M = MemoryBlock(); - return *this; - } - ~OwningMemoryBlock() { - Memory::releaseMappedMemory(M); - } - void *base() const { return M.base(); } - size_t size() const { return M.size(); } - MemoryBlock getMemoryBlock() const { return M; } - private: - MemoryBlock M; - }; +private: + MemoryBlock M; +}; #ifndef NDEBUG - /// Debugging output for Memory::ProtectionFlags. - raw_ostream &operator<<(raw_ostream &OS, const Memory::ProtectionFlags &PF); +/// Debugging output for Memory::ProtectionFlags. +raw_ostream &operator<<(raw_ostream &OS, const Memory::ProtectionFlags &PF); - /// Debugging output for MemoryBlock. - raw_ostream &operator<<(raw_ostream &OS, const MemoryBlock &MB); +/// Debugging output for MemoryBlock. +raw_ostream &operator<<(raw_ostream &OS, const MemoryBlock &MB); #endif // ifndef NDEBUG - } // end namespace sys - } // end namespace llvm +} // end namespace sys +} // end namespace llvm #endif Index: lib/ExecutionEngine/JITLink/JITLink.cpp =================================================================== --- lib/ExecutionEngine/JITLink/JITLink.cpp +++ lib/ExecutionEngine/JITLink/JITLink.cpp @@ -154,7 +154,7 @@ MutableArrayRef getWorkingMemory(ProtectionFlags Seg) override { assert(SegBlocks.count(Seg) && "No allocation for segment"); return {static_cast(SegBlocks[Seg].base()), - SegBlocks[Seg].size()}; + SegBlocks[Seg].allocatedSize()}; } JITTargetAddress getTargetMemory(ProtectionFlags Seg) override { assert(SegBlocks.count(Seg) && "No allocation for segment"); @@ -178,7 +178,8 @@ if (auto EC = sys::Memory::protectMappedMemory(Block, Prot)) return errorCodeToError(EC); if (Prot & sys::Memory::MF_EXEC) - sys::Memory::InvalidateInstructionCache(Block.base(), Block.size()); + sys::Memory::InvalidateInstructionCache(Block.base(), + Block.allocatedSize()); } return Error::success(); } Index: lib/ExecutionEngine/SectionMemoryManager.cpp =================================================================== --- lib/ExecutionEngine/SectionMemoryManager.cpp +++ lib/ExecutionEngine/SectionMemoryManager.cpp @@ -64,9 +64,9 @@ // Look in the list of free memory regions and use a block there if one // is available. for (FreeMemBlock &FreeMB : MemGroup.FreeMem) { - if (FreeMB.Free.size() >= RequiredSize) { + if (FreeMB.Free.allocatedSize() >= RequiredSize) { Addr = (uintptr_t)FreeMB.Free.base(); - uintptr_t EndOfBlock = Addr + FreeMB.Free.size(); + uintptr_t EndOfBlock = Addr + FreeMB.Free.allocatedSize(); // Align the address. Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1); @@ -115,7 +115,7 @@ // Remember that we allocated this memory MemGroup.AllocatedMem.push_back(MB); Addr = (uintptr_t)MB.base(); - uintptr_t EndOfBlock = Addr + MB.size(); + uintptr_t EndOfBlock = Addr + MB.allocatedSize(); // Align the address. Addr = (Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1); @@ -177,7 +177,7 @@ size_t StartOverlap = (PageSize - ((uintptr_t)M.base() % PageSize)) % PageSize; - size_t TrimmedSize = M.size(); + size_t TrimmedSize = M.allocatedSize(); TrimmedSize -= StartOverlap; TrimmedSize -= TrimmedSize % PageSize; @@ -185,8 +185,9 @@ TrimmedSize); assert(((uintptr_t)Trimmed.base() % PageSize) == 0); - assert((Trimmed.size() % PageSize) == 0); - assert(M.base() <= Trimmed.base() && Trimmed.size() <= M.size()); + assert((Trimmed.allocatedSize() % PageSize) == 0); + assert(M.base() <= Trimmed.base() && + Trimmed.allocatedSize() <= M.allocatedSize()); return Trimmed; } @@ -209,17 +210,19 @@ } // Remove all blocks which are now empty - MemGroup.FreeMem.erase( - remove_if(MemGroup.FreeMem, - [](FreeMemBlock &FreeMB) { return FreeMB.Free.size() == 0; }), - MemGroup.FreeMem.end()); + MemGroup.FreeMem.erase(remove_if(MemGroup.FreeMem, + [](FreeMemBlock &FreeMB) { + return FreeMB.Free.allocatedSize() == 0; + }), + MemGroup.FreeMem.end()); return std::error_code(); } void SectionMemoryManager::invalidateInstructionCache() { for (sys::MemoryBlock &Block : CodeMem.PendingMem) - sys::Memory::InvalidateInstructionCache(Block.base(), Block.size()); + sys::Memory::InvalidateInstructionCache(Block.base(), + Block.allocatedSize()); } SectionMemoryManager::~SectionMemoryManager() { @@ -242,11 +245,7 @@ allocateMappedMemory(SectionMemoryManager::AllocationPurpose Purpose, size_t NumBytes, const sys::MemoryBlock *const NearBlock, unsigned Flags, std::error_code &EC) override { - // allocateMappedMemory calls mmap(2). We round up a request size - // to page size to get extra space for free. - static const size_t PageSize = sys::Process::getPageSizeEstimate(); - size_t ReqBytes = (NumBytes + PageSize - 1) & ~(PageSize - 1); - return sys::Memory::allocateMappedMemory(ReqBytes, NearBlock, Flags, EC); + return sys::Memory::allocateMappedMemory(NumBytes, NearBlock, Flags, EC); } std::error_code protectMappedMemory(const sys::MemoryBlock &Block, Index: lib/Support/FileOutputBuffer.cpp =================================================================== --- lib/Support/FileOutputBuffer.cpp +++ lib/Support/FileOutputBuffer.cpp @@ -75,20 +75,22 @@ // output file on commit(). This is used only when we cannot use OnDiskBuffer. class InMemoryBuffer : public FileOutputBuffer { public: - InMemoryBuffer(StringRef Path, MemoryBlock Buf, unsigned Mode) - : FileOutputBuffer(Path), Buffer(Buf), Mode(Mode) {} + InMemoryBuffer(StringRef Path, MemoryBlock Buf, std::size_t BufSize, + unsigned Mode) + : FileOutputBuffer(Path), Buffer(Buf), BufferSize(BufSize), + Mode(Mode) {} uint8_t *getBufferStart() const override { return (uint8_t *)Buffer.base(); } uint8_t *getBufferEnd() const override { - return (uint8_t *)Buffer.base() + Buffer.size(); + return (uint8_t *)Buffer.base() + BufferSize; } - size_t getBufferSize() const override { return Buffer.size(); } + size_t getBufferSize() const override { return BufferSize; } Error commit() override { if (FinalPath == "-") { - llvm::outs() << StringRef((const char *)Buffer.base(), Buffer.size()); + llvm::outs() << StringRef((const char *)Buffer.base(), BufferSize); llvm::outs().flush(); return Error::success(); } @@ -100,12 +102,14 @@ openFileForWrite(FinalPath, FD, CD_CreateAlways, OF_None, Mode)) return errorCodeToError(EC); raw_fd_ostream OS(FD, /*shouldClose=*/true, /*unbuffered=*/true); - OS << StringRef((const char *)Buffer.base(), Buffer.size()); + OS << StringRef((const char *)Buffer.base(), BufferSize); return Error::success(); } private: + // Buffer may actually contain a larger memory block than BufferSize OwningMemoryBlock Buffer; + size_t BufferSize; unsigned Mode; }; } // namespace @@ -117,7 +121,7 @@ Size, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC); if (EC) return errorCodeToError(EC); - return llvm::make_unique(Path, MB, Mode); + return llvm::make_unique(Path, MB, Size, Mode); } static Expected> Index: lib/Support/Memory.cpp =================================================================== --- lib/Support/Memory.cpp +++ lib/Support/Memory.cpp @@ -43,8 +43,8 @@ raw_ostream &operator<<(raw_ostream &OS, const MemoryBlock &MB) { return OS << "[ " << MB.base() << " .. " - << (void *)((char *)MB.base() + MB.size()) << " ] (" << MB.size() - << " bytes)"; + << (void *)((char *)MB.base() + MB.allocatedSize()) << " ] (" + << MB.allocatedSize() << " bytes)"; } } // end namespace sys Index: lib/Support/Unix/Memory.inc =================================================================== --- lib/Support/Unix/Memory.inc +++ lib/Support/Unix/Memory.inc @@ -102,13 +102,15 @@ // Use any near hint and the page size to set a page-aligned starting address uintptr_t Start = NearBlock ? reinterpret_cast(NearBlock->base()) + - NearBlock->size() : 0; + NearBlock->allocatedSize() : 0; static const size_t PageSize = Process::getPageSizeEstimate(); + const size_t NumPages = (NumBytes+PageSize-1)/PageSize; + if (Start && Start % PageSize) Start += PageSize - Start % PageSize; // FIXME: Handle huge page requests (MF_HUGE_HINT). - void *Addr = ::mmap(reinterpret_cast(Start), NumBytes, Protect, + void *Addr = ::mmap(reinterpret_cast(Start), PageSize*NumPages, Protect, MMFlags, fd, 0); if (Addr == MAP_FAILED) { if (NearBlock) //Try again without a near hint @@ -120,7 +122,7 @@ MemoryBlock Result; Result.Address = Addr; - Result.Size = NumBytes; + Result.AllocatedSize = PageSize*NumPages; Result.Flags = PFlags; // Rely on protectMappedMemory to invalidate instruction cache. @@ -135,14 +137,14 @@ std::error_code Memory::releaseMappedMemory(MemoryBlock &M) { - if (M.Address == nullptr || M.Size == 0) + if (M.Address == nullptr || M.AllocatedSize == 0) return std::error_code(); - if (0 != ::munmap(M.Address, M.Size)) + if (0 != ::munmap(M.Address, M.AllocatedSize)) return std::error_code(errno, std::generic_category()); M.Address = nullptr; - M.Size = 0; + M.AllocatedSize = 0; return std::error_code(); } @@ -150,7 +152,7 @@ std::error_code Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { static const size_t PageSize = Process::getPageSizeEstimate(); - if (M.Address == nullptr || M.Size == 0) + if (M.Address == nullptr || M.AllocatedSize == 0) return std::error_code(); if (!Flags) @@ -158,7 +160,7 @@ int Protect = getPosixProtectionFlags(Flags); uintptr_t Start = alignAddr((uint8_t *)M.Address - PageSize + 1, PageSize); - uintptr_t End = alignAddr((uint8_t *)M.Address + M.Size, PageSize); + uintptr_t End = alignAddr((uint8_t *)M.Address + M.AllocatedSize, PageSize); bool InvalidateCache = (Flags & MF_EXEC); @@ -171,7 +173,7 @@ if (Result != 0) return std::error_code(errno, std::generic_category()); - Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize); InvalidateCache = false; } #endif @@ -182,7 +184,7 @@ return std::error_code(errno, std::generic_category()); if (InvalidateCache) - Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize); return std::error_code(); } Index: lib/Support/Windows/Memory.inc =================================================================== --- lib/Support/Windows/Memory.inc +++ lib/Support/Windows/Memory.inc @@ -125,7 +125,7 @@ size_t NumBlocks = (NumBytes + Granularity - 1) / Granularity; uintptr_t Start = NearBlock ? reinterpret_cast(NearBlock->base()) + - NearBlock->size() + NearBlock->allocatedSize() : 0; // If the requested address is not aligned to the allocation granularity, @@ -149,7 +149,7 @@ MemoryBlock Result; Result.Address = PA; - Result.Size = NumBytes; + Result.AllocatedSize = AllocSize; Result.Flags = (Flags & ~MF_HUGE_HINT) | (HugePages ? MF_HUGE_HINT : 0); if (Flags & MF_EXEC) @@ -159,31 +159,31 @@ } std::error_code Memory::releaseMappedMemory(MemoryBlock &M) { - if (M.Address == 0 || M.Size == 0) + if (M.Address == 0 || M.AllocatedSize == 0) return std::error_code(); if (!VirtualFree(M.Address, 0, MEM_RELEASE)) return mapWindowsError(::GetLastError()); M.Address = 0; - M.Size = 0; + M.AllocatedSize = 0; return std::error_code(); } std::error_code Memory::protectMappedMemory(const MemoryBlock &M, unsigned Flags) { - if (M.Address == 0 || M.Size == 0) + if (M.Address == 0 || M.AllocatedSize == 0) return std::error_code(); DWORD Protect = getWindowsProtectionFlags(Flags); DWORD OldFlags; - if (!VirtualProtect(M.Address, M.Size, Protect, &OldFlags)) + if (!VirtualProtect(M.Address, M.AllocatedSize, Protect, &OldFlags)) return mapWindowsError(::GetLastError()); if (Flags & MF_EXEC) - Memory::InvalidateInstructionCache(M.Address, M.Size); + Memory::InvalidateInstructionCache(M.Address, M.AllocatedSize); return std::error_code(); } Index: tools/llvm-rtdyld/llvm-rtdyld.cpp =================================================================== --- tools/llvm-rtdyld/llvm-rtdyld.cpp +++ tools/llvm-rtdyld/llvm-rtdyld.cpp @@ -27,9 +27,9 @@ #include "llvm/Support/CommandLine.h" #include "llvm/Support/DynamicLibrary.h" #include "llvm/Support/InitLLVM.h" +#include "llvm/Support/MSVCErrorWorkarounds.h" #include "llvm/Support/Memory.h" #include "llvm/Support/MemoryBuffer.h" -#include "llvm/Support/MSVCErrorWorkarounds.h" #include "llvm/Support/Path.h" #include "llvm/Support/TargetRegistry.h" #include "llvm/Support/TargetSelect.h" @@ -41,9 +41,8 @@ using namespace llvm; using namespace llvm::object; -static cl::list -InputFileList(cl::Positional, cl::ZeroOrMore, - cl::desc("")); +static cl::list InputFileList(cl::Positional, cl::ZeroOrMore, + cl::desc("")); enum ActionType { AC_Execute, @@ -53,47 +52,44 @@ AC_Verify }; -static cl::opt -Action(cl::desc("Action to perform:"), - cl::init(AC_Execute), - cl::values(clEnumValN(AC_Execute, "execute", - "Load, link, and execute the inputs."), - clEnumValN(AC_PrintLineInfo, "printline", - "Load, link, and print line information for each function."), - clEnumValN(AC_PrintDebugLineInfo, "printdebugline", - "Load, link, and print line information for each function using the debug object"), - clEnumValN(AC_PrintObjectLineInfo, "printobjline", - "Like -printlineinfo but does not load the object first"), - clEnumValN(AC_Verify, "verify", - "Load, link and verify the resulting memory image."))); +static cl::opt Action( + cl::desc("Action to perform:"), cl::init(AC_Execute), + cl::values( + clEnumValN(AC_Execute, "execute", + "Load, link, and execute the inputs."), + clEnumValN(AC_PrintLineInfo, "printline", + "Load, link, and print line information for each function."), + clEnumValN(AC_PrintDebugLineInfo, "printdebugline", + "Load, link, and print line information for each function " + "using the debug object"), + clEnumValN(AC_PrintObjectLineInfo, "printobjline", + "Like -printlineinfo but does not load the object first"), + clEnumValN(AC_Verify, "verify", + "Load, link and verify the resulting memory image."))); static cl::opt -EntryPoint("entry", - cl::desc("Function to call as entry point."), - cl::init("_main")); + EntryPoint("entry", cl::desc("Function to call as entry point."), + cl::init("_main")); -static cl::list -Dylibs("dylib", - cl::desc("Add library."), - cl::ZeroOrMore); +static cl::list Dylibs("dylib", cl::desc("Add library."), + cl::ZeroOrMore); static cl::list InputArgv("args", cl::Positional, cl::desc("..."), cl::ZeroOrMore, cl::PositionalEatsArgs); static cl::opt -TripleName("triple", cl::desc("Target triple for disassembler")); + TripleName("triple", cl::desc("Target triple for disassembler")); static cl::opt -MCPU("mcpu", - cl::desc("Target a specific cpu type (-mcpu=help for details)"), - cl::value_desc("cpu-name"), - cl::init("")); + MCPU("mcpu", + cl::desc("Target a specific cpu type (-mcpu=help for details)"), + cl::value_desc("cpu-name"), cl::init("")); static cl::list -CheckFiles("check", - cl::desc("File containing RuntimeDyld verifier checks."), - cl::ZeroOrMore); + CheckFiles("check", + cl::desc("File containing RuntimeDyld verifier checks."), + cl::ZeroOrMore); static cl::opt PreallocMemory("preallocate", @@ -119,24 +115,22 @@ cl::init(0), cl::Hidden); static cl::list -SpecificSectionMappings("map-section", - cl::desc("For -verify only: Map a section to a " - "specific address."), - cl::ZeroOrMore, - cl::Hidden); + SpecificSectionMappings("map-section", + cl::desc("For -verify only: Map a section to a " + "specific address."), + cl::ZeroOrMore, cl::Hidden); -static cl::list -DummySymbolMappings("dummy-extern", - cl::desc("For -verify only: Inject a symbol into the extern " - "symbol table."), - cl::ZeroOrMore, - cl::Hidden); +static cl::list DummySymbolMappings( + "dummy-extern", + cl::desc("For -verify only: Inject a symbol into the extern " + "symbol table."), + cl::ZeroOrMore, cl::Hidden); -static cl::opt -PrintAllocationRequests("print-alloc-requests", - cl::desc("Print allocation requests made to the memory " - "manager by RuntimeDyld"), - cl::Hidden); +static cl::opt PrintAllocationRequests( + "print-alloc-requests", + cl::desc("Print allocation requests made to the memory " + "manager by RuntimeDyld"), + cl::Hidden); ExitOnError ExitOnErr; @@ -163,7 +157,7 @@ auto J = SectionIDs.find(SectionName); if (J == SectionIDs.end()) return make_error("No section named \"" + SectionName + - "\" in file " + FileName, + "\" in file " + FileName, inconvertibleErrorCode()); return J->second; } @@ -174,7 +168,7 @@ public: struct SectionInfo { SectionInfo(StringRef Name, sys::MemoryBlock MB, unsigned SectionID) - : Name(Name), MB(std::move(MB)), SectionID(SectionID) {} + : Name(Name), MB(std::move(MB)), SectionID(SectionID) {} std::string Name; sys::MemoryBlock MB; unsigned SectionID = ~0U; @@ -191,9 +185,7 @@ bool IsReadOnly) override; /// If non null, records subsequent Name -> SectionID mappings. - void setSectionIDsMap(SectionIDMap *SecIDMap) { - this->SecIDMap = SecIDMap; - } + void setSectionIDsMap(SectionIDMap *SecIDMap) { this->SecIDMap = SecIDMap; } void *getPointerToNamedFunction(const std::string &Name, bool AbortOnFailure = true) override { @@ -229,11 +221,8 @@ void preallocateSlab(uint64_t Size) { std::error_code EC; - sys::MemoryBlock MB = - sys::Memory::allocateMappedMemory(Size, nullptr, - sys::Memory::MF_READ | - sys::Memory::MF_WRITE, - EC); + sys::MemoryBlock MB = sys::Memory::allocateMappedMemory( + Size, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC); if (!MB.base()) report_fatal_error("Can't allocate enough memory: " + EC.message()); @@ -255,7 +244,7 @@ else DataMemory.push_back(SectionInfo(SectionName, MB, SectionID)); CurrentSlabOffset += Size; - return (uint8_t*)OldSlabOffset; + return (uint8_t *)OldSlabOffset; } private: @@ -272,26 +261,24 @@ unsigned SectionID, StringRef SectionName) { if (PrintAllocationRequests) - outs() << "allocateCodeSection(Size = " << Size << ", Alignment = " - << Alignment << ", SectionName = " << SectionName << ")\n"; + outs() << "allocateCodeSection(Size = " << Size + << ", Alignment = " << Alignment << ", SectionName = " << SectionName + << ")\n"; if (SecIDMap) (*SecIDMap)[SectionName] = SectionID; if (UsePreallocation) - return allocateFromSlab(Size, Alignment, true /* isCode */, - SectionName, SectionID); + return allocateFromSlab(Size, Alignment, true /* isCode */, SectionName, + SectionID); std::error_code EC; - sys::MemoryBlock MB = - sys::Memory::allocateMappedMemory(Size, nullptr, - sys::Memory::MF_READ | - sys::Memory::MF_WRITE, - EC); + sys::MemoryBlock MB = sys::Memory::allocateMappedMemory( + Size, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC); if (!MB.base()) report_fatal_error("MemoryManager allocation failed: " + EC.message()); FunctionMemory.push_back(SectionInfo(SectionName, MB, SectionID)); - return (uint8_t*)MB.base(); + return (uint8_t *)MB.base(); } uint8_t *TrivialMemoryManager::allocateDataSection(uintptr_t Size, @@ -300,8 +287,9 @@ StringRef SectionName, bool IsReadOnly) { if (PrintAllocationRequests) - outs() << "allocateDataSection(Size = " << Size << ", Alignment = " - << Alignment << ", SectionName = " << SectionName << ")\n"; + outs() << "allocateDataSection(Size = " << Size + << ", Alignment = " << Alignment << ", SectionName = " << SectionName + << ")\n"; if (SecIDMap) (*SecIDMap)[SectionName] = SectionID; @@ -311,15 +299,12 @@ SectionID); std::error_code EC; - sys::MemoryBlock MB = - sys::Memory::allocateMappedMemory(Size, nullptr, - sys::Memory::MF_READ | - sys::Memory::MF_WRITE, - EC); + sys::MemoryBlock MB = sys::Memory::allocateMappedMemory( + Size, nullptr, sys::Memory::MF_READ | sys::Memory::MF_WRITE, EC); if (!MB.base()) report_fatal_error("MemoryManager allocation failed: " + EC.message()); DataMemory.push_back(SectionInfo(SectionName, MB, SectionID)); - return (uint8_t*)MB.base(); + return (uint8_t *)MB.base(); } static const char *ProgramName; @@ -363,7 +348,7 @@ ErrorAndExit("unable to read input: '" + EC.message() + "'"); Expected> MaybeObj( - ObjectFile::createObjectFile((*InputBuffer)->getMemBufferRef())); + ObjectFile::createObjectFile((*InputBuffer)->getMemBufferRef())); if (!MaybeObj) { std::string Buf; @@ -380,8 +365,7 @@ ObjectFile *SymbolObj = &Obj; if (LoadObjects) { // Load the object file - LoadedObjInfo = - Dyld.loadObject(Obj); + LoadedObjInfo = Dyld.loadObject(Obj); if (Dyld.hasError()) ErrorAndExit(Dyld.getErrorString()); @@ -445,7 +429,7 @@ Sec->getName(SecName); Address.SectionIndex = Sec->getIndex(); uint64_t SectionLoadAddress = - LoadedObjInfo->getSectionLoadAddress(*Sec); + LoadedObjInfo->getSectionLoadAddress(*Sec); if (SectionLoadAddress != 0) Addr += SectionLoadAddress - Sec->getAddress(); } else if (auto SecOrErr = Sym.getSection()) @@ -472,7 +456,8 @@ // Allocate a slab of memory upfront, if required. This is used if // we want to test small code models. if (static_cast(PreallocMemory) < 0) - report_fatal_error("Pre-allocated bytes of memory must be a positive integer."); + report_fatal_error( + "Pre-allocated bytes of memory must be a positive integer."); // FIXME: Limit the amount of memory that can be preallocated? if (PreallocMemory != 0) @@ -498,7 +483,7 @@ if (std::error_code EC = InputBuffer.getError()) ErrorAndExit("unable to read input: '" + EC.message() + "'"); Expected> MaybeObj( - ObjectFile::createObjectFile((*InputBuffer)->getMemBufferRef())); + ObjectFile::createObjectFile((*InputBuffer)->getMemBufferRef())); if (!MaybeObj) { std::string Buf; @@ -533,18 +518,17 @@ // Make sure the memory is executable. // setExecutable will call InvalidateInstructionCache. - if (auto EC = sys::Memory::protectMappedMemory(FM_MB, - sys::Memory::MF_READ | - sys::Memory::MF_EXEC)) + if (auto EC = sys::Memory::protectMappedMemory( + FM_MB, sys::Memory::MF_READ | sys::Memory::MF_EXEC)) ErrorAndExit("unable to mark function executable: '" + EC.message() + "'"); } // Dispatch to _main(). - errs() << "loaded '" << EntryPoint << "' at: " << (void*)MainAddress << "\n"; + errs() << "loaded '" << EntryPoint << "' at: " << (void *)MainAddress << "\n"; - int (*Main)(int, const char**) = - (int(*)(int,const char**)) uintptr_t(MainAddress); + int (*Main)(int, const char **) = + (int (*)(int, const char **))uintptr_t(MainAddress); std::vector Argv; // Use the name of the first input object module as argv[0] for the target. Argv.push_back(InputFileList[0].data()); @@ -555,12 +539,12 @@ } static int checkAllExpressions(RuntimeDyldChecker &Checker) { - for (const auto& CheckerFileName : CheckFiles) { + for (const auto &CheckerFileName : CheckFiles) { ErrorOr> CheckerFileBuf = MemoryBuffer::getFileOrSTDIN(CheckerFileName); if (std::error_code EC = CheckerFileBuf.getError()) - ErrorAndExit("unable to read input '" + CheckerFileName + "': " + - EC.message()); + ErrorAndExit("unable to read input '" + CheckerFileName + + "': " + EC.message()); if (!Checker.checkAllRulesInBuffer("# rtdyld-check:", CheckerFileBuf.get().get())) @@ -584,9 +568,9 @@ std::string FileName = SectionIDStr.substr(0, ComaIdx); std::string SectionName = SectionIDStr.substr(ComaIdx + 1); unsigned SectionID = - ExitOnErr(getSectionId(FileToSecIDMap, FileName, SectionName)); + ExitOnErr(getSectionId(FileToSecIDMap, FileName, SectionName)); - auto* OldAddr = Dyld.getSectionContent(SectionID).data(); + auto *OldAddr = Dyld.getSectionContent(SectionID).data(); std::string NewAddrStr = Mapping.substr(EqualsIdx + 1); uint64_t NewAddr; @@ -602,9 +586,10 @@ // Remaps section addresses for -verify mode. The following command line options // can be used to customize the layout of the memory within the phony target's // address space: -// -target-addr-start -- Specify where the phony target address range starts. -// -target-addr-end -- Specify where the phony target address range ends. -// -target-section-sep -- Specify how big a gap should be left between the +// -target-addr-start -- Specify where the phony target address range +// starts. -target-addr-end -- Specify where the phony target address +// range ends. -target-section-sep -- Specify how big a gap should be left +// between the // end of one section and the start of the next. // Defaults to zero. Set to something big // (e.g. 1 << 32) to stress-test stubs, GOTs, etc. @@ -614,12 +599,12 @@ TrivialMemoryManager &MemMgr) { // Set up a work list (section addr/size pairs). - typedef std::list WorklistT; + typedef std::list WorklistT; WorklistT Worklist; - for (const auto& CodeSection : MemMgr.FunctionMemory) + for (const auto &CodeSection : MemMgr.FunctionMemory) Worklist.push_back(&CodeSection); - for (const auto& DataSection : MemMgr.DataMemory) + for (const auto &DataSection : MemMgr.DataMemory) Worklist.push_back(&DataSection); // Keep an "already allocated" mapping of section target addresses to sizes. @@ -631,20 +616,19 @@ // Move the previously applied mappings (whether explicitly specified on the // command line, or implicitly set by RuntimeDyld) into the already-allocated // map. - for (WorklistT::iterator I = Worklist.begin(), E = Worklist.end(); - I != E;) { + for (WorklistT::iterator I = Worklist.begin(), E = Worklist.end(); I != E;) { WorklistT::iterator Tmp = I; ++I; auto LoadAddr = Dyld.getSectionLoadAddress((*Tmp)->SectionID); - if (LoadAddr != static_cast( - reinterpret_cast((*Tmp)->MB.base()))) { + if (LoadAddr != + static_cast(reinterpret_cast((*Tmp)->MB.base()))) { // A section will have a LoadAddr of 0 if it wasn't loaded for whatever // reason (e.g. zero byte COFF sections). Don't include those sections in // the allocation map. if (LoadAddr != 0) - AlreadyAllocated[LoadAddr] = (*Tmp)->MB.size(); + AlreadyAllocated[LoadAddr] = (*Tmp)->MB.allocatedSize(); Worklist.erase(Tmp); } } @@ -668,13 +652,14 @@ uint64_t NextSectionAddr = TargetAddrStart; for (const auto &Alloc : AlreadyAllocated) - if (NextSectionAddr + CurEntry->MB.size() + TargetSectionSep <= Alloc.first) + if (NextSectionAddr + CurEntry->MB.allocatedSize() + TargetSectionSep <= + Alloc.first) break; else NextSectionAddr = Alloc.first + Alloc.second + TargetSectionSep; Dyld.mapSectionAddress(CurEntry->MB.base(), NextSectionAddr); - AlreadyAllocated[NextSectionAddr] = CurEntry->MB.size(); + AlreadyAllocated[NextSectionAddr] = CurEntry->MB.allocatedSize(); } // Add dummy symbols to the memory manager. @@ -709,14 +694,14 @@ Triple TheTriple(Triple::normalize(TripleName)); std::string ErrorStr; const Target *TheTarget = - TargetRegistry::lookupTarget("", TheTriple, ErrorStr); + TargetRegistry::lookupTarget("", TheTriple, ErrorStr); if (!TheTarget) ErrorAndExit("Error accessing target '" + TripleName + "': " + ErrorStr); TripleName = TheTriple.getTriple(); std::unique_ptr STI( - TheTarget->createMCSubtargetInfo(TripleName, MCPU, "")); + TheTarget->createMCSubtargetInfo(TripleName, MCPU, "")); if (!STI) ErrorAndExit("Unable to create subtarget info!"); @@ -731,7 +716,7 @@ MCContext Ctx(MAI.get(), MRI.get(), nullptr); std::unique_ptr Disassembler( - TheTarget->createMCDisassembler(*STI, Ctx)); + TheTarget->createMCDisassembler(*STI, Ctx)); if (!Disassembler) ErrorAndExit("Unable to create disassembler!"); @@ -877,7 +862,7 @@ ErrorAndExit("unable to read input: '" + EC.message() + "'"); Expected> MaybeObj( - ObjectFile::createObjectFile((*InputBuffer)->getMemBufferRef())); + ObjectFile::createObjectFile((*InputBuffer)->getMemBufferRef())); if (!MaybeObj) { std::string Buf; @@ -940,11 +925,14 @@ case AC_Execute: return executeInput(); case AC_PrintDebugLineInfo: - return printLineInfoForInput(/* LoadObjects */ true,/* UseDebugObj */ true); + return printLineInfoForInput(/* LoadObjects */ true, + /* UseDebugObj */ true); case AC_PrintLineInfo: - return printLineInfoForInput(/* LoadObjects */ true,/* UseDebugObj */false); + return printLineInfoForInput(/* LoadObjects */ true, + /* UseDebugObj */ false); case AC_PrintObjectLineInfo: - return printLineInfoForInput(/* LoadObjects */false,/* UseDebugObj */false); + return printLineInfoForInput(/* LoadObjects */ false, + /* UseDebugObj */ false); case AC_Verify: return linkAndVerify(); } Index: unittests/Support/MemoryTest.cpp =================================================================== --- unittests/Support/MemoryTest.cpp +++ unittests/Support/MemoryTest.cpp @@ -59,15 +59,16 @@ switch (RequestedFlags) { case Memory::MF_READ: case Memory::MF_WRITE: - case Memory::MF_READ|Memory::MF_WRITE: - return Memory::MF_READ|Memory::MF_WRITE; - case Memory::MF_READ|Memory::MF_EXEC: - case Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC: + case Memory::MF_READ | Memory::MF_WRITE: + return Memory::MF_READ | Memory::MF_WRITE; + case Memory::MF_READ | Memory::MF_EXEC: + case Memory::MF_READ | Memory::MF_WRITE | Memory::MF_EXEC: case Memory::MF_EXEC: - return Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC; + return Memory::MF_READ | Memory::MF_WRITE | Memory::MF_EXEC; } - // Default in case values are added to the enum, as required by some compilers - return Memory::MF_READ|Memory::MF_WRITE; + // Default in case values are added to the enum, as required by some + // compilers + return Memory::MF_READ | Memory::MF_WRITE; } // Returns true if the memory blocks overlap @@ -76,31 +77,32 @@ return true; if (M1.base() > M2.base()) - return (unsigned char *)M2.base() + M2.size() > M1.base(); + return (unsigned char *)M2.base() + M2.allocatedSize() > M1.base(); - return (unsigned char *)M1.base() + M1.size() > M2.base(); + return (unsigned char *)M1.base() + M1.allocatedSize() > M2.base(); } unsigned Flags; - size_t PageSize; + size_t PageSize; }; // MPROTECT prevents W+X mmaps -#define CHECK_UNSUPPORTED() \ - do { \ - if ((Flags & Memory::MF_WRITE) && (Flags & Memory::MF_EXEC) && \ - IsMPROTECT()) \ - return; \ +#define CHECK_UNSUPPORTED() \ + do { \ + if ((Flags & Memory::MF_WRITE) && (Flags & Memory::MF_EXEC) && \ + IsMPROTECT()) \ + return; \ } while (0) TEST_P(MappedMemoryTest, AllocAndRelease) { CHECK_UNSUPPORTED(); std::error_code EC; - MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), nullptr, Flags,EC); + MemoryBlock M1 = + Memory::allocateMappedMemory(sizeof(int), nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); - EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(sizeof(int), M1.size()); + EXPECT_NE((void *)nullptr, M1.base()); + EXPECT_LE(sizeof(int), M1.allocatedSize()); EXPECT_FALSE(Memory::releaseMappedMemory(M1)); } @@ -116,7 +118,7 @@ // returned, if large pages aren't available. EXPECT_NE((void *)nullptr, M1.base()); - EXPECT_LE(sizeof(int), M1.size()); + EXPECT_LE(sizeof(int), M1.allocatedSize()); EXPECT_FALSE(Memory::releaseMappedMemory(M1)); } @@ -131,12 +133,12 @@ MemoryBlock M3 = Memory::allocateMappedMemory(32, nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); - EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(16U, M1.size()); - EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(64U, M2.size()); - EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(32U, M3.size()); + EXPECT_NE((void *)nullptr, M1.base()); + EXPECT_LE(16U, M1.allocatedSize()); + EXPECT_NE((void *)nullptr, M2.base()); + EXPECT_LE(64U, M2.allocatedSize()); + EXPECT_NE((void *)nullptr, M3.base()); + EXPECT_LE(32U, M3.allocatedSize()); EXPECT_FALSE(doesOverlap(M1, M2)); EXPECT_FALSE(doesOverlap(M2, M3)); @@ -146,27 +148,27 @@ EXPECT_FALSE(Memory::releaseMappedMemory(M3)); MemoryBlock M4 = Memory::allocateMappedMemory(16, nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); - EXPECT_NE((void*)nullptr, M4.base()); - EXPECT_LE(16U, M4.size()); + EXPECT_NE((void *)nullptr, M4.base()); + EXPECT_LE(16U, M4.allocatedSize()); EXPECT_FALSE(Memory::releaseMappedMemory(M4)); EXPECT_FALSE(Memory::releaseMappedMemory(M2)); } TEST_P(MappedMemoryTest, BasicWrite) { // This test applies only to readable and writeable combinations - if (Flags && - !((Flags & Memory::MF_READ) && (Flags & Memory::MF_WRITE))) + if (Flags && !((Flags & Memory::MF_READ) && (Flags & Memory::MF_WRITE))) return; CHECK_UNSUPPORTED(); std::error_code EC; - MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), nullptr, Flags,EC); + MemoryBlock M1 = + Memory::allocateMappedMemory(sizeof(int), nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); - EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(sizeof(int), M1.size()); + EXPECT_NE((void *)nullptr, M1.base()); + EXPECT_LE(sizeof(int), M1.allocatedSize()); - int *a = (int*)M1.base(); + int *a = (int *)M1.base(); *a = 1; EXPECT_EQ(1, *a); @@ -175,42 +177,41 @@ TEST_P(MappedMemoryTest, MultipleWrite) { // This test applies only to readable and writeable combinations - if (Flags && - !((Flags & Memory::MF_READ) && (Flags & Memory::MF_WRITE))) + if (Flags && !((Flags & Memory::MF_READ) && (Flags & Memory::MF_WRITE))) return; CHECK_UNSUPPORTED(); std::error_code EC; - MemoryBlock M1 = Memory::allocateMappedMemory(sizeof(int), nullptr, Flags, - EC); + MemoryBlock M1 = + Memory::allocateMappedMemory(sizeof(int), nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); - MemoryBlock M2 = Memory::allocateMappedMemory(8 * sizeof(int), nullptr, Flags, - EC); + MemoryBlock M2 = + Memory::allocateMappedMemory(8 * sizeof(int), nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); - MemoryBlock M3 = Memory::allocateMappedMemory(4 * sizeof(int), nullptr, Flags, - EC); + MemoryBlock M3 = + Memory::allocateMappedMemory(4 * sizeof(int), nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); EXPECT_FALSE(doesOverlap(M1, M2)); EXPECT_FALSE(doesOverlap(M2, M3)); EXPECT_FALSE(doesOverlap(M1, M3)); - EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(1U * sizeof(int), M1.size()); - EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(8U * sizeof(int), M2.size()); - EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(4U * sizeof(int), M3.size()); + EXPECT_NE((void *)nullptr, M1.base()); + EXPECT_LE(1U * sizeof(int), M1.allocatedSize()); + EXPECT_NE((void *)nullptr, M2.base()); + EXPECT_LE(8U * sizeof(int), M2.allocatedSize()); + EXPECT_NE((void *)nullptr, M3.base()); + EXPECT_LE(4U * sizeof(int), M3.allocatedSize()); - int *x = (int*)M1.base(); + int *x = (int *)M1.base(); *x = 1; - int *y = (int*)M2.base(); + int *y = (int *)M2.base(); for (int i = 0; i < 8; i++) { y[i] = i; } - int *z = (int*)M3.base(); + int *z = (int *)M3.base(); *z = 42; EXPECT_EQ(1, *x); @@ -220,12 +221,12 @@ EXPECT_FALSE(Memory::releaseMappedMemory(M1)); EXPECT_FALSE(Memory::releaseMappedMemory(M3)); - MemoryBlock M4 = Memory::allocateMappedMemory(64 * sizeof(int), nullptr, - Flags, EC); + MemoryBlock M4 = + Memory::allocateMappedMemory(64 * sizeof(int), nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); - EXPECT_NE((void*)nullptr, M4.base()); - EXPECT_LE(64U * sizeof(int), M4.size()); - x = (int*)M4.base(); + EXPECT_NE((void *)nullptr, M4.base()); + EXPECT_LE(64U * sizeof(int), M4.allocatedSize()); + x = (int *)M4.base(); *x = 4; EXPECT_EQ(4, *x); EXPECT_FALSE(Memory::releaseMappedMemory(M4)); @@ -244,22 +245,22 @@ return; std::error_code EC; - MemoryBlock M1 = Memory::allocateMappedMemory(2 * sizeof(int), nullptr, Flags, - EC); + MemoryBlock M1 = + Memory::allocateMappedMemory(2 * sizeof(int), nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); - MemoryBlock M2 = Memory::allocateMappedMemory(8 * sizeof(int), nullptr, Flags, - EC); + MemoryBlock M2 = + Memory::allocateMappedMemory(8 * sizeof(int), nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); - MemoryBlock M3 = Memory::allocateMappedMemory(4 * sizeof(int), nullptr, Flags, - EC); + MemoryBlock M3 = + Memory::allocateMappedMemory(4 * sizeof(int), nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); - EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(2U * sizeof(int), M1.size()); - EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(8U * sizeof(int), M2.size()); - EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(4U * sizeof(int), M3.size()); + EXPECT_NE((void *)nullptr, M1.base()); + EXPECT_LE(2U * sizeof(int), M1.allocatedSize()); + EXPECT_NE((void *)nullptr, M2.base()); + EXPECT_LE(8U * sizeof(int), M2.allocatedSize()); + EXPECT_NE((void *)nullptr, M3.base()); + EXPECT_LE(4U * sizeof(int), M3.allocatedSize()); EXPECT_FALSE(Memory::protectMappedMemory(M1, getTestableEquivalent(Flags))); EXPECT_FALSE(Memory::protectMappedMemory(M2, getTestableEquivalent(Flags))); @@ -269,13 +270,13 @@ EXPECT_FALSE(doesOverlap(M2, M3)); EXPECT_FALSE(doesOverlap(M1, M3)); - int *x = (int*)M1.base(); + int *x = (int *)M1.base(); *x = 1; - int *y = (int*)M2.base(); + int *y = (int *)M2.base(); for (unsigned int i = 0; i < 8; i++) { y[i] = i; } - int *z = (int*)M3.base(); + int *z = (int *)M3.base(); *z = 42; EXPECT_EQ(1, *x); @@ -288,11 +289,11 @@ MemoryBlock M4 = Memory::allocateMappedMemory(16, nullptr, Flags, EC); EXPECT_EQ(std::error_code(), EC); - EXPECT_NE((void*)nullptr, M4.base()); - EXPECT_LE(16U, M4.size()); + EXPECT_NE((void *)nullptr, M4.base()); + EXPECT_LE(16U, M4.allocatedSize()); EXPECT_EQ(std::error_code(), Memory::protectMappedMemory(M4, getTestableEquivalent(Flags))); - x = (int*)M4.base(); + x = (int *)M4.base(); *x = 4; EXPECT_EQ(4, *x); EXPECT_FALSE(Memory::releaseMappedMemory(M4)); @@ -309,12 +310,12 @@ MemoryBlock M3 = Memory::allocateMappedMemory(32, &M2, Flags, EC); EXPECT_EQ(std::error_code(), EC); - EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(16U, M1.size()); - EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(64U, M2.size()); - EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(32U, M3.size()); + EXPECT_NE((void *)nullptr, M1.base()); + EXPECT_LE(16U, M1.allocatedSize()); + EXPECT_NE((void *)nullptr, M2.base()); + EXPECT_LE(64U, M2.allocatedSize()); + EXPECT_NE((void *)nullptr, M3.base()); + EXPECT_LE(32U, M3.allocatedSize()); EXPECT_FALSE(doesOverlap(M1, M2)); EXPECT_FALSE(doesOverlap(M2, M3)); @@ -328,7 +329,7 @@ TEST_P(MappedMemoryTest, DuplicateNear) { CHECK_UNSUPPORTED(); std::error_code EC; - MemoryBlock Near((void*)(3*PageSize), 16); + MemoryBlock Near((void *)(3 * PageSize), 16); MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC); EXPECT_EQ(std::error_code(), EC); MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC); @@ -336,12 +337,12 @@ MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC); EXPECT_EQ(std::error_code(), EC); - EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(16U, M1.size()); - EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(64U, M2.size()); - EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(32U, M3.size()); + EXPECT_NE((void *)nullptr, M1.base()); + EXPECT_LE(16U, M1.allocatedSize()); + EXPECT_NE((void *)nullptr, M2.base()); + EXPECT_LE(64U, M2.allocatedSize()); + EXPECT_NE((void *)nullptr, M3.base()); + EXPECT_LE(32U, M3.allocatedSize()); EXPECT_FALSE(Memory::releaseMappedMemory(M1)); EXPECT_FALSE(Memory::releaseMappedMemory(M3)); @@ -359,12 +360,12 @@ MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC); EXPECT_EQ(std::error_code(), EC); - EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(16U, M1.size()); - EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(64U, M2.size()); - EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(32U, M3.size()); + EXPECT_NE((void *)nullptr, M1.base()); + EXPECT_LE(16U, M1.allocatedSize()); + EXPECT_NE((void *)nullptr, M2.base()); + EXPECT_LE(64U, M2.allocatedSize()); + EXPECT_NE((void *)nullptr, M3.base()); + EXPECT_LE(32U, M3.allocatedSize()); EXPECT_FALSE(doesOverlap(M1, M2)); EXPECT_FALSE(doesOverlap(M2, M3)); @@ -378,7 +379,7 @@ TEST_P(MappedMemoryTest, ZeroSizeNear) { CHECK_UNSUPPORTED(); std::error_code EC; - MemoryBlock Near((void*)(4*PageSize), 0); + MemoryBlock Near((void *)(4 * PageSize), 0); MemoryBlock M1 = Memory::allocateMappedMemory(16, &Near, Flags, EC); EXPECT_EQ(std::error_code(), EC); MemoryBlock M2 = Memory::allocateMappedMemory(64, &Near, Flags, EC); @@ -386,12 +387,12 @@ MemoryBlock M3 = Memory::allocateMappedMemory(32, &Near, Flags, EC); EXPECT_EQ(std::error_code(), EC); - EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(16U, M1.size()); - EXPECT_NE((void*)nullptr, M2.base()); - EXPECT_LE(64U, M2.size()); - EXPECT_NE((void*)nullptr, M3.base()); - EXPECT_LE(32U, M3.size()); + EXPECT_NE((void *)nullptr, M1.base()); + EXPECT_LE(16U, M1.allocatedSize()); + EXPECT_NE((void *)nullptr, M2.base()); + EXPECT_LE(64U, M2.allocatedSize()); + EXPECT_NE((void *)nullptr, M3.base()); + EXPECT_LE(32U, M3.allocatedSize()); EXPECT_FALSE(doesOverlap(M1, M2)); EXPECT_FALSE(doesOverlap(M2, M3)); @@ -405,29 +406,26 @@ TEST_P(MappedMemoryTest, UnalignedNear) { CHECK_UNSUPPORTED(); std::error_code EC; - MemoryBlock Near((void*)(2*PageSize+5), 0); + MemoryBlock Near((void *)(2 * PageSize + 5), 0); MemoryBlock M1 = Memory::allocateMappedMemory(15, &Near, Flags, EC); EXPECT_EQ(std::error_code(), EC); - EXPECT_NE((void*)nullptr, M1.base()); - EXPECT_LE(sizeof(int), M1.size()); + EXPECT_NE((void *)nullptr, M1.base()); + EXPECT_LE(sizeof(int), M1.allocatedSize()); EXPECT_FALSE(Memory::releaseMappedMemory(M1)); } // Note that Memory::MF_WRITE is not supported exclusively across // operating systems and architectures and can imply MF_READ|MF_WRITE -unsigned MemoryFlags[] = { - Memory::MF_READ, - Memory::MF_WRITE, - Memory::MF_READ|Memory::MF_WRITE, - Memory::MF_EXEC, - Memory::MF_READ|Memory::MF_EXEC, - Memory::MF_READ|Memory::MF_WRITE|Memory::MF_EXEC - }; +unsigned MemoryFlags[] = {Memory::MF_READ, + Memory::MF_WRITE, + Memory::MF_READ | Memory::MF_WRITE, + Memory::MF_EXEC, + Memory::MF_READ | Memory::MF_EXEC, + Memory::MF_READ | Memory::MF_WRITE | Memory::MF_EXEC}; -INSTANTIATE_TEST_CASE_P(AllocationTests, - MappedMemoryTest, - ::testing::ValuesIn(MemoryFlags),); +INSTANTIATE_TEST_CASE_P(AllocationTests, MappedMemoryTest, + ::testing::ValuesIn(MemoryFlags), ); -} // anonymous namespace +} // anonymous namespace