diff --git a/bolt/include/bolt/Core/BinarySection.h b/bolt/include/bolt/Core/BinarySection.h --- a/bolt/include/bolt/Core/BinarySection.h +++ b/bolt/include/bolt/Core/BinarySection.h @@ -157,7 +157,7 @@ BinarySection(BinaryContext &BC, SectionRef Section) : BC(BC), Name(getName(Section)), Section(Section), Contents(getContents(Section)), Address(Section.getAddress()), - Size(Section.getSize()), Alignment(Section.getAlignment()), + Size(Section.getSize()), Alignment(Section.getAlignment().value()), OutputName(Name), SectionNumber(++Count) { if (isELF()) { ELFType = ELFSectionRef(Section).getType(); diff --git a/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h b/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h --- a/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h +++ b/llvm/include/llvm/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.h @@ -60,10 +60,9 @@ unsigned SectionID, StringRef SectionName, bool IsReadOnly) override; - void reserveAllocationSpace(uintptr_t CodeSize, uint32_t CodeAlign, - uintptr_t RODataSize, uint32_t RODataAlign, - uintptr_t RWDataSize, - uint32_t RWDataAlign) override; + void reserveAllocationSpace(uintptr_t CodeSize, Align CodeAlign, + uintptr_t RODataSize, Align RODataAlign, + uintptr_t RWDataSize, Align RWDataAlign) override; bool needsToReserveAllocationSpace() override; diff --git a/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h b/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h --- a/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h +++ b/llvm/include/llvm/ExecutionEngine/RuntimeDyld.h @@ -134,11 +134,10 @@ /// /// Note that by default the callback is disabled. To enable it /// redefine the method needsToReserveAllocationSpace to return true. - virtual void reserveAllocationSpace(uintptr_t CodeSize, uint32_t CodeAlign, - uintptr_t RODataSize, - uint32_t RODataAlign, + virtual void reserveAllocationSpace(uintptr_t CodeSize, Align CodeAlign, + uintptr_t RODataSize, Align RODataAlign, uintptr_t RWDataSize, - uint32_t RWDataAlign) {} + Align RWDataAlign) {} /// Override to return true to enable the reserveAllocationSpace callback. virtual bool needsToReserveAllocationSpace() { return false; } diff --git a/llvm/include/llvm/Object/ObjectFile.h b/llvm/include/llvm/Object/ObjectFile.h --- a/llvm/include/llvm/Object/ObjectFile.h +++ b/llvm/include/llvm/Object/ObjectFile.h @@ -99,8 +99,8 @@ uint64_t getSize() const; Expected getContents() const; - /// Get the alignment of this section as the actual value (not log 2). - uint64_t getAlignment() const; + /// Get the alignment of this section. + Align getAlignment() const; bool isCompressed() const; /// Whether this section contains instructions. @@ -481,8 +481,9 @@ return StringRef(reinterpret_cast(Res->data()), Res->size()); } -inline uint64_t SectionRef::getAlignment() const { - return OwningObject->getSectionAlignment(SectionPimpl); +inline Align SectionRef::getAlignment() const { + return MaybeAlign(OwningObject->getSectionAlignment(SectionPimpl)) + .valueOrOne(); } inline bool SectionRef::isCompressed() const { diff --git a/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp b/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp --- a/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp +++ b/llvm/lib/ExecutionEngine/Orc/EPCGenericRTDyldMemoryManager.cpp @@ -94,8 +94,8 @@ } void EPCGenericRTDyldMemoryManager::reserveAllocationSpace( - uintptr_t CodeSize, uint32_t CodeAlign, uintptr_t RODataSize, - uint32_t RODataAlign, uintptr_t RWDataSize, uint32_t RWDataAlign) { + uintptr_t CodeSize, Align CodeAlign, uintptr_t RODataSize, + Align RODataAlign, uintptr_t RWDataSize, Align RWDataAlign) { { std::lock_guard Lock(M); @@ -103,15 +103,15 @@ if (!ErrMsg.empty()) return; - if (!isPowerOf2_32(CodeAlign) || CodeAlign > EPC.getPageSize()) { + if (CodeAlign > EPC.getPageSize()) { ErrMsg = "Invalid code alignment in reserveAllocationSpace"; return; } - if (!isPowerOf2_32(RODataAlign) || RODataAlign > EPC.getPageSize()) { + if (RODataAlign > EPC.getPageSize()) { ErrMsg = "Invalid ro-data alignment in reserveAllocationSpace"; return; } - if (!isPowerOf2_32(RWDataAlign) || RWDataAlign > EPC.getPageSize()) { + if (RWDataAlign > EPC.getPageSize()) { ErrMsg = "Invalid rw-data alignment in reserveAllocationSpace"; return; } diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp --- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyld.cpp @@ -191,11 +191,9 @@ // and pass this information to the memory manager if (MemMgr.needsToReserveAllocationSpace()) { uint64_t CodeSize = 0, RODataSize = 0, RWDataSize = 0; - uint32_t CodeAlign = 1, RODataAlign = 1, RWDataAlign = 1; - if (auto Err = computeTotalAllocSize(Obj, - CodeSize, CodeAlign, - RODataSize, RODataAlign, - RWDataSize, RWDataAlign)) + Align CodeAlign, RODataAlign, RWDataAlign; + if (auto Err = computeTotalAllocSize(Obj, CodeSize, CodeAlign, RODataSize, + RODataAlign, RWDataSize, RWDataAlign)) return std::move(Err); MemMgr.reserveAllocationSpace(CodeSize, CodeAlign, RODataSize, RODataAlign, RWDataSize, RWDataAlign); @@ -463,13 +461,10 @@ // assuming that all sections are allocated with the given alignment static uint64_t computeAllocationSizeForSections(std::vector &SectionSizes, - uint64_t Alignment) { + Align Alignment) { uint64_t TotalSize = 0; - for (uint64_t SectionSize : SectionSizes) { - uint64_t AlignedSize = - (SectionSize + Alignment - 1) / Alignment * Alignment; - TotalSize += AlignedSize; - } + for (uint64_t SectionSize : SectionSizes) + TotalSize += alignTo(SectionSize, Alignment); return TotalSize; } @@ -537,13 +532,10 @@ // Compute an upper bound of the memory size that is required to load all // sections -Error RuntimeDyldImpl::computeTotalAllocSize(const ObjectFile &Obj, - uint64_t &CodeSize, - uint32_t &CodeAlign, - uint64_t &RODataSize, - uint32_t &RODataAlign, - uint64_t &RWDataSize, - uint32_t &RWDataAlign) { +Error RuntimeDyldImpl::computeTotalAllocSize( + const ObjectFile &Obj, uint64_t &CodeSize, Align &CodeAlign, + uint64_t &RODataSize, Align &RODataAlign, uint64_t &RWDataSize, + Align &RWDataAlign) { // Compute the size of all sections required for execution std::vector CodeSectionSizes; std::vector ROSectionSizes; @@ -560,8 +552,7 @@ // Consider only the sections that are required to be loaded for execution if (IsRequired) { uint64_t DataSize = Section.getSize(); - uint64_t Alignment64 = Section.getAlignment(); - unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL; + Align Alignment = Section.getAlignment(); bool IsCode = Section.isText(); bool IsReadOnly = isReadOnlyData(Section); bool IsTLS = isTLS(Section); @@ -577,7 +568,7 @@ if (Name == ".eh_frame") PaddingSize += 4; if (StubBufSize != 0) - PaddingSize += getStubAlignment() - 1; + PaddingSize += getStubAlignment().value() - 1; uint64_t SectionSize = DataSize + PaddingSize + StubBufSize; @@ -610,12 +601,12 @@ // single GOT entry. if (unsigned GotSize = computeGOTSize(Obj)) { RWSectionSizes.push_back(GotSize); - RWDataAlign = std::max(RWDataAlign, getGOTEntrySize()); + RWDataAlign = std::max(RWDataAlign, Align(getGOTEntrySize())); } // Compute the size of all common symbols uint64_t CommonSize = 0; - uint32_t CommonAlign = 1; + Align CommonAlign; for (symbol_iterator I = Obj.symbol_begin(), E = Obj.symbol_end(); I != E; ++I) { Expected FlagsOrErr = I->getFlags(); @@ -625,12 +616,12 @@ if (*FlagsOrErr & SymbolRef::SF_Common) { // Add the common symbols to a list. We'll allocate them all below. uint64_t Size = I->getCommonSize(); - uint32_t Align = I->getAlignment(); + Align Alignment = Align(I->getAlignment()); // If this is the first common symbol, use its alignment as the alignment // for the common symbols section. if (CommonSize == 0) - CommonAlign = Align; - CommonSize = alignTo(CommonSize, Align) + Size; + CommonAlign = Alignment; + CommonSize = alignTo(CommonSize, Alignment) + Size; } } if (CommonSize != 0) { @@ -706,14 +697,13 @@ // Get section data size and alignment uint64_t DataSize = Section.getSize(); - uint64_t Alignment64 = Section.getAlignment(); + Align Alignment = Section.getAlignment(); // Add stubbuf size alignment - unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL; - unsigned StubAlignment = getStubAlignment(); - unsigned EndAlignment = (DataSize | Alignment) & -(DataSize | Alignment); + Align StubAlignment = getStubAlignment(); + Align EndAlignment = commonAlignment(Alignment, DataSize); if (StubAlignment > EndAlignment) - StubBufSize += StubAlignment - EndAlignment; + StubBufSize += StubAlignment.value() - EndAlignment.value(); return StubBufSize; } @@ -812,9 +802,8 @@ const SectionRef &Section, bool IsCode) { StringRef data; - uint64_t Alignment64 = Section.getAlignment(); + Align Alignment = Section.getAlignment(); - unsigned Alignment = (unsigned)Alignment64 & 0xffffffffL; unsigned PaddingSize = 0; unsigned StubBufSize = 0; bool IsRequired = isRequiredForExecution(Section); @@ -824,11 +813,6 @@ bool IsTLS = isTLS(Section); uint64_t DataSize = Section.getSize(); - // An alignment of 0 (at least with ELF) is identical to an alignment of 1, - // while being more "polite". Other formats do not support 0-aligned sections - // anyway, so we should guarantee that the alignment is always at least 1. - Alignment = std::max(1u, Alignment); - Expected NameOrErr = Section.getName(); if (!NameOrErr) return NameOrErr.takeError(); @@ -865,7 +849,7 @@ // section is remapped. if (StubBufSize != 0) { Alignment = std::max(Alignment, getStubAlignment()); - PaddingSize += getStubAlignment() - 1; + PaddingSize += getStubAlignment().value() - 1; } // Some sections, such as debug info, don't need to be loaded for execution. @@ -875,15 +859,16 @@ if (!Allocate) Allocate = 1; if (IsTLS) { - auto TLSSection = - MemMgr.allocateTLSSection(Allocate, Alignment, SectionID, Name); + auto TLSSection = MemMgr.allocateTLSSection(Allocate, Alignment.value(), + SectionID, Name); Addr = TLSSection.InitializationImage; LoadAddress = TLSSection.Offset; } else if (IsCode) { - Addr = MemMgr.allocateCodeSection(Allocate, Alignment, SectionID, Name); + Addr = MemMgr.allocateCodeSection(Allocate, Alignment.value(), SectionID, + Name); } else { - Addr = MemMgr.allocateDataSection(Allocate, Alignment, SectionID, Name, - IsReadOnly); + Addr = MemMgr.allocateDataSection(Allocate, Alignment.value(), SectionID, + Name, IsReadOnly); } if (!Addr) report_fatal_error("Unable to allocate section memory!"); @@ -903,7 +888,7 @@ // Align DataSize to stub alignment if we have any stubs (PaddingSize will // have been increased above to account for this). if (StubBufSize > 0) - DataSize &= -(uint64_t)getStubAlignment(); + DataSize &= -(uint64_t)getStubAlignment().value(); } LLVM_DEBUG(dbgs() << "emitSection SectionID: " << SectionID << " Name: " diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h --- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.h @@ -79,11 +79,11 @@ return 0; } - unsigned getStubAlignment() override { + Align getStubAlignment() override { if (Arch == Triple::systemz) - return 8; + return Align(8); else - return 1; + return Align(1); } void setMipsABI(const ObjectFile &Obj) override; diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp --- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp @@ -1730,10 +1730,8 @@ LLVM_DEBUG(dbgs() << " Create a new stub function\n"); uintptr_t BaseAddress = uintptr_t(Section.getAddress()); - uintptr_t StubAlignment = getStubAlignment(); StubAddress = - (BaseAddress + Section.getStubOffset() + StubAlignment - 1) & - -StubAlignment; + alignTo(BaseAddress + Section.getStubOffset(), getStubAlignment()); unsigned StubOffset = StubAddress - BaseAddress; Stubs[Value] = StubOffset; @@ -1784,10 +1782,8 @@ LLVM_DEBUG(dbgs() << " Create a new stub function\n"); uintptr_t BaseAddress = uintptr_t(Section->getAddress()); - uintptr_t StubAlignment = getStubAlignment(); - StubAddress = - (BaseAddress + Section->getStubOffset() + StubAlignment - 1) & - -StubAlignment; + StubAddress = alignTo(BaseAddress + Section->getStubOffset(), + getStubAlignment()); unsigned StubOffset = StubAddress - BaseAddress; Stubs[Value] = StubOffset; createStubFunction((uint8_t *)StubAddress); diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h --- a/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/RuntimeDyldImpl.h @@ -312,7 +312,7 @@ NotifyStubEmittedFunction NotifyStubEmitted; virtual unsigned getMaxStubSize() const = 0; - virtual unsigned getStubAlignment() = 0; + virtual Align getStubAlignment() = 0; bool HasError; std::string ErrorStr; @@ -417,10 +417,10 @@ // Compute an upper bound of the memory that is required to load all // sections - Error computeTotalAllocSize(const ObjectFile &Obj, - uint64_t &CodeSize, uint32_t &CodeAlign, - uint64_t &RODataSize, uint32_t &RODataAlign, - uint64_t &RWDataSize, uint32_t &RWDataAlign); + Error computeTotalAllocSize(const ObjectFile &Obj, uint64_t &CodeSize, + Align &CodeAlign, uint64_t &RODataSize, + Align &RODataAlign, uint64_t &RWDataSize, + Align &RWDataAlign); // Compute GOT size unsigned computeGOTSize(const ObjectFile &Obj); diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h --- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFAArch64.h @@ -92,7 +92,7 @@ : RuntimeDyldCOFF(MM, Resolver, 8, COFF::IMAGE_REL_ARM64_ADDR64), ImageBase(0) {} - unsigned getStubAlignment() override { return 8; } + Align getStubAlignment() override { return Align(8); } unsigned getMaxStubSize() const override { return 20; } diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h --- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFI386.h @@ -31,7 +31,7 @@ return 8; // 2-byte jmp instruction + 32-bit relative address + 2 byte pad } - unsigned getStubAlignment() override { return 1; } + Align getStubAlignment() override { return Align(1); } Expected processRelocationRef(unsigned SectionID, diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h --- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFThumb.h @@ -53,7 +53,7 @@ return 16; // 8-byte load instructions, 4-byte jump, 4-byte padding } - unsigned getStubAlignment() override { return 1; } + Align getStubAlignment() override { return Align(1); } Expected processRelocationRef(unsigned SectionID, diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h --- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldCOFFX86_64.h @@ -59,7 +59,7 @@ : RuntimeDyldCOFF(MM, Resolver, 8, COFF::IMAGE_REL_AMD64_ADDR64), ImageBase(0) {} - unsigned getStubAlignment() override { return 1; } + Align getStubAlignment() override { return Align(1); } // 2-byte jmp instruction + 32-bit relative address + 64-bit absolute jump unsigned getMaxStubSize() const override { return 14; } diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h --- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOAArch64.h @@ -28,7 +28,7 @@ unsigned getMaxStubSize() const override { return 8; } - unsigned getStubAlignment() override { return 8; } + Align getStubAlignment() override { return Align(8); } /// Extract the addend encoded in the instruction / memory location. Expected decodeAddend(const RelocationEntry &RE) const { @@ -453,13 +453,13 @@ // FIXME: There must be a better way to do this then to check and fix the // alignment every time!!! uintptr_t BaseAddress = uintptr_t(Section.getAddress()); - uintptr_t StubAlignment = getStubAlignment(); + uintptr_t StubAlignment = getStubAlignment().value(); uintptr_t StubAddress = (BaseAddress + Section.getStubOffset() + StubAlignment - 1) & -StubAlignment; unsigned StubOffset = StubAddress - BaseAddress; Stubs[Value] = StubOffset; - assert(((StubAddress % getStubAlignment()) == 0) && + assert(isAligned(getStubAlignment(), StubAddress) && "GOT entry not aligned"); RelocationEntry GOTRE(RE.SectionID, StubOffset, MachO::ARM64_RELOC_UNSIGNED, Value.Offset, diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h --- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOARM.h @@ -30,7 +30,7 @@ unsigned getMaxStubSize() const override { return 8; } - unsigned getStubAlignment() override { return 4; } + Align getStubAlignment() override { return Align(4); } Expected getJITSymbolFlags(const SymbolRef &SR) override { auto Flags = RuntimeDyldImpl::getJITSymbolFlags(SR); diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h --- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOI386.h @@ -27,7 +27,7 @@ unsigned getMaxStubSize() const override { return 0; } - unsigned getStubAlignment() override { return 1; } + Align getStubAlignment() override { return Align(1); } Expected processRelocationRef(unsigned SectionID, relocation_iterator RelI, diff --git a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h --- a/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h +++ b/llvm/lib/ExecutionEngine/RuntimeDyld/Targets/RuntimeDyldMachOX86_64.h @@ -27,7 +27,7 @@ unsigned getMaxStubSize() const override { return 8; } - unsigned getStubAlignment() override { return 8; } + Align getStubAlignment() override { return Align(8); } Expected processRelocationRef(unsigned SectionID, relocation_iterator RelI, diff --git a/llvm/tools/dsymutil/DwarfLinkerForBinary.cpp b/llvm/tools/dsymutil/DwarfLinkerForBinary.cpp --- a/llvm/tools/dsymutil/DwarfLinkerForBinary.cpp +++ b/llvm/tools/dsymutil/DwarfLinkerForBinary.cpp @@ -542,7 +542,7 @@ // place. SectionToOffsetInDwarf[SectionKind] += Section.getSize(); Streamer->emitSwiftReflectionSection(SectionKind, *SectionContents, - Section.getAlignment(), + Section.getAlignment().value(), Section.getSize()); } } diff --git a/llvm/tools/lli/ForwardingMemoryManager.h b/llvm/tools/lli/ForwardingMemoryManager.h --- a/llvm/tools/lli/ForwardingMemoryManager.h +++ b/llvm/tools/lli/ForwardingMemoryManager.h @@ -43,10 +43,10 @@ IsReadOnly); } - void reserveAllocationSpace(uintptr_t CodeSize, uint32_t CodeAlign, - uintptr_t RODataSize, uint32_t RODataAlign, + void reserveAllocationSpace(uintptr_t CodeSize, Align CodeAlign, + uintptr_t RODataSize, Align RODataAlign, uintptr_t RWDataSize, - uint32_t RWDataAlign) override { + Align RWDataAlign) override { MemMgr->reserveAllocationSpace(CodeSize, CodeAlign, RODataSize, RODataAlign, RWDataSize, RWDataAlign); } diff --git a/llvm/unittests/ExecutionEngine/MCJIT/MCJITCAPITest.cpp b/llvm/unittests/ExecutionEngine/MCJIT/MCJITCAPITest.cpp --- a/llvm/unittests/ExecutionEngine/MCJIT/MCJITCAPITest.cpp +++ b/llvm/unittests/ExecutionEngine/MCJIT/MCJITCAPITest.cpp @@ -86,10 +86,10 @@ bool needsToReserveAllocationSpace() override { return true; } - void reserveAllocationSpace(uintptr_t CodeSize, uint32_t CodeAlign, - uintptr_t DataSizeRO, uint32_t RODataAlign, + void reserveAllocationSpace(uintptr_t CodeSize, Align CodeAlign, + uintptr_t DataSizeRO, Align RODataAlign, uintptr_t DataSizeRW, - uint32_t RWDataAlign) override { + Align RWDataAlign) override { ReservedCodeSize = CodeSize; ReservedDataSizeRO = DataSizeRO; ReservedDataSizeRW = DataSizeRW;