Index: lib/ReaderWriter/MachO/ArchHandler.h =================================================================== --- lib/ReaderWriter/MachO/ArchHandler.h +++ lib/ReaderWriter/MachO/ArchHandler.h @@ -120,7 +120,7 @@ getReferenceInfo(const normalized::Relocation &reloc, const DefinedAtom *inAtom, uint32_t offsetInAtom, - uint64_t fixupAddress, bool swap, + uint64_t fixupAddress, bool isBigEndian, FindAtomBySectionAndAddress atomFromAddress, FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind, @@ -136,7 +136,7 @@ const normalized::Relocation &reloc2, const DefinedAtom *inAtom, uint32_t offsetInAtom, - uint64_t fixupAddress, bool swap, + uint64_t fixupAddress, bool isBig, FindAtomBySectionAndAddress atomFromAddress, FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind, @@ -213,7 +213,7 @@ } /// Does a given unwind-cfi atom represent a CIE (as opposed to an FDE). - static bool isDwarfCIE(bool swap, const DefinedAtom *atom); + static bool isDwarfCIE(bool isBig, const DefinedAtom *atom); struct ReferenceInfo { Reference::KindArch arch; @@ -290,10 +290,10 @@ RelocPattern pattern); - static int16_t readS16(bool swap, const uint8_t *addr); - static int32_t readS32(bool swap, const uint8_t *addr); - static uint32_t readU32(bool swap, const uint8_t *addr); - static int64_t readS64(bool swap, const uint8_t *addr); + static int16_t readS16(const uint8_t *addr, bool isBig); + static int32_t readS32(const uint8_t *addr, bool isBig); + static uint32_t readU32(const uint8_t *addr, bool isBig); + static int64_t readS64(const uint8_t *addr, bool isBig); }; } // namespace mach_o Index: lib/ReaderWriter/MachO/ArchHandler.cpp =================================================================== --- lib/ReaderWriter/MachO/ArchHandler.cpp +++ lib/ReaderWriter/MachO/ArchHandler.cpp @@ -126,31 +126,31 @@ } -int16_t ArchHandler::readS16(bool swap, const uint8_t *addr) { - return read16(swap, *reinterpret_cast(addr)); +int16_t ArchHandler::readS16(const uint8_t *addr, bool isBig) { + return read16(addr, isBig); } -int32_t ArchHandler::readS32(bool swap, const uint8_t *addr) { - return read32(swap, *reinterpret_cast(addr)); +int32_t ArchHandler::readS32(const uint8_t *addr, bool isBig) { + return read32(addr, isBig); } -uint32_t ArchHandler::readU32(bool swap, const uint8_t *addr) { - return read32(swap, *reinterpret_cast(addr)); +uint32_t ArchHandler::readU32(const uint8_t *addr, bool isBig) { + return read32(addr, isBig); } -int64_t ArchHandler::readS64(bool swap, const uint8_t *addr) { - return read64(swap, *reinterpret_cast(addr)); + int64_t ArchHandler::readS64(const uint8_t *addr, bool isBig) { + return read64(addr, isBig); } -bool ArchHandler::isDwarfCIE(bool swap, const DefinedAtom *atom) { +bool ArchHandler::isDwarfCIE(bool isBig, const DefinedAtom *atom) { assert(atom->contentType() == DefinedAtom::typeCFI); - uint32_t size = read32(swap, *(uint32_t *)atom->rawContent().data()); + uint32_t size = read32(atom->rawContent().data(), isBig); uint32_t idOffset = sizeof(uint32_t); if (size == 0xffffffffU) idOffset += sizeof(uint64_t); - return read32(swap, *(uint32_t *)(atom->rawContent().data() + idOffset)) == 0; + return read32(atom->rawContent().data() + idOffset, isBig) == 0; } const Atom *ArchHandler::fdeTargetFunction(const DefinedAtom *fde) { Index: lib/ReaderWriter/MachO/ArchHandler_arm.cpp =================================================================== --- lib/ReaderWriter/MachO/ArchHandler_arm.cpp +++ lib/ReaderWriter/MachO/ArchHandler_arm.cpp @@ -15,6 +15,7 @@ #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Triple.h" +#include "llvm/Support/Endian.h" #include "llvm/Support/ErrorHandling.h" using namespace llvm::MachO; @@ -23,6 +24,10 @@ namespace lld { namespace mach_o { +using llvm::support::ulittle32_t; +using llvm::support::little32_t; + + class ArchHandler_arm : public ArchHandler { public: ArchHandler_arm(); @@ -193,16 +198,13 @@ uint64_t targetAddress, uint64_t inAtomAddress, bool &thumbMode, bool targetIsThumb); - - const bool _swap; }; //===----------------------------------------------------------------------===// // ArchHandler_arm //===----------------------------------------------------------------------===// -ArchHandler_arm::ArchHandler_arm() : - _swap(!MachOLinkingContext::isHostEndian(MachOLinkingContext::arch_armv7)) {} +ArchHandler_arm::ArchHandler_arm() { } ArchHandler_arm::~ArchHandler_arm() { } @@ -513,14 +515,14 @@ std::error_code ArchHandler_arm::getReferenceInfo( const Relocation &reloc, const DefinedAtom *inAtom, uint32_t offsetInAtom, - uint64_t fixupAddress, bool swap, + uint64_t fixupAddress, bool isBig, FindAtomBySectionAndAddress atomFromAddress, FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind, const lld::Atom **target, Reference::Addend *addend) { typedef std::error_code E; const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom]; uint64_t targetAddress; - uint32_t instruction = readU32(swap, fixupContent); + uint32_t instruction = *(ulittle32_t *)fixupContent; int32_t displacement; switch (relocPattern(reloc)) { case ARM_THUMB_RELOC_BR22 | rPcRel | rExtern | rLength4: @@ -628,7 +630,7 @@ const normalized::Relocation &reloc2, const DefinedAtom *inAtom, uint32_t offsetInAtom, - uint64_t fixupAddress, bool swap, + uint64_t fixupAddress, bool isBig, FindAtomBySectionAndAddress atomFromAddr, FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind, @@ -779,7 +781,7 @@ } const uint8_t *fixupContent = &inAtom->rawContent()[offsetInAtom]; std::error_code ec; - uint32_t instruction = readU32(swap, fixupContent); + uint32_t instruction = *(ulittle32_t *)fixupContent; uint32_t value; uint32_t fromAddress; uint32_t toAddress; @@ -899,7 +901,7 @@ return std::error_code(); } -void ArchHandler_arm::applyFixupFinal(const Reference &ref, uint8_t *location, +void ArchHandler_arm::applyFixupFinal(const Reference &ref, uint8_t *loc, uint64_t fixupAddress, uint64_t targetAddress, uint64_t inAtomAddress, @@ -907,7 +909,7 @@ if (ref.kindNamespace() != Reference::KindNamespace::mach_o) return; assert(ref.kindArch() == Reference::KindArch::ARM); - int32_t *loc32 = reinterpret_cast(location); + ulittle32_t *loc32 = reinterpret_cast(loc); int32_t displacement; uint16_t value16; uint32_t value32; @@ -924,76 +926,76 @@ case thumb_bl22: assert(thumbMode); displacement = (targetAddress - (fixupAddress + 4)) + ref.addend(); - value32 = setDisplacementInThumbBranch(*loc32, fixupAddress, displacement, - targetIsThumb); - write32(*loc32, _swap, value32); + value32 = setDisplacementInThumbBranch(*loc32, fixupAddress, + displacement, targetIsThumb); + *loc32 = value32; break; case thumb_movw: assert(thumbMode); value16 = (targetAddress + ref.addend()) & 0xFFFF; if (targetIsThumb) value16 |= 1; - write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16)); + *loc32 = setWordFromThumbMov(*loc32, value16); break; case thumb_movt: assert(thumbMode); value16 = (targetAddress + ref.addend()) >> 16; - write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16)); + *loc32 = setWordFromThumbMov(*loc32, value16); break; case thumb_movw_funcRel: assert(thumbMode); value16 = (targetAddress - inAtomAddress + ref.addend()) & 0xFFFF; if (targetIsThumb) value16 |= 1; - write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16)); + *loc32 = setWordFromThumbMov(*loc32, value16); break; case thumb_movt_funcRel: assert(thumbMode); value16 = (targetAddress - inAtomAddress + ref.addend()) >> 16; - write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16)); + *loc32 = setWordFromThumbMov(*loc32, value16); break; case arm_b24: case arm_bl24: assert(!thumbMode); displacement = (targetAddress - (fixupAddress + 8)) + ref.addend(); value32 = setDisplacementInArmBranch(*loc32, displacement, targetIsThumb); - write32(*loc32, _swap, value32); + *loc32 = value32; break; case arm_movw: assert(!thumbMode); value16 = (targetAddress + ref.addend()) & 0xFFFF; if (targetIsThumb) value16 |= 1; - write32(*loc32, _swap, setWordFromArmMov(*loc32, value16)); + *loc32 = setWordFromArmMov(*loc32, value16); break; case arm_movt: assert(!thumbMode); value16 = (targetAddress + ref.addend()) >> 16; - write32(*loc32, _swap, setWordFromArmMov(*loc32, value16)); + *loc32 = setWordFromArmMov(*loc32, value16); break; case arm_movw_funcRel: assert(!thumbMode); value16 = (targetAddress - inAtomAddress + ref.addend()) & 0xFFFF; if (targetIsThumb) value16 |= 1; - write32(*loc32, _swap, setWordFromArmMov(*loc32, value16)); + *loc32 = setWordFromArmMov(*loc32, value16); break; case arm_movt_funcRel: assert(!thumbMode); value16 = (targetAddress - inAtomAddress + ref.addend()) >> 16; - write32(*loc32, _swap, setWordFromArmMov(*loc32, value16)); + *loc32 = setWordFromArmMov(*loc32, value16); break; case pointer32: if (targetIsThumb) - write32(*loc32, _swap, targetAddress + ref.addend() + 1); + *loc32 = targetAddress + ref.addend() + 1; else - write32(*loc32, _swap, targetAddress + ref.addend()); + *loc32 = targetAddress + ref.addend(); break; case delta32: if (targetIsThumb) - write32(*loc32, _swap, targetAddress - fixupAddress + ref.addend() + 1); + *loc32 = targetAddress - fixupAddress + ref.addend() + 1; else - write32(*loc32, _swap, targetAddress - fixupAddress + ref.addend()); + *loc32 = targetAddress - fixupAddress + ref.addend(); break; case lazyPointer: case lazyImmediateLocation: @@ -1059,15 +1061,14 @@ return false; } -void ArchHandler_arm::applyFixupRelocatable(const Reference &ref, - uint8_t *location, - uint64_t fixupAddress, - uint64_t targetAddress, - uint64_t inAtomAddress, - bool &thumbMode, - bool targetIsThumb) { +void ArchHandler_arm::applyFixupRelocatable(const Reference &ref, uint8_t *loc, + uint64_t fixupAddress, + uint64_t targetAddress, + uint64_t inAtomAddress, + bool &thumbMode, + bool targetIsThumb) { bool useExternalReloc = useExternalRelocationTo(*ref.target()); - int32_t *loc32 = reinterpret_cast(location); + ulittle32_t *loc32 = reinterpret_cast(loc); int32_t displacement; uint16_t value16; uint32_t value32; @@ -1087,9 +1088,9 @@ displacement = (ref.addend() - (fixupAddress + 4)); else displacement = (targetAddress - (fixupAddress + 4)) + ref.addend(); - value32 = setDisplacementInThumbBranch(*loc32, fixupAddress, displacement, - targetIsThumb); - write32(*loc32, _swap, value32); + value32 = setDisplacementInThumbBranch(*loc32, fixupAddress, + displacement, targetIsThumb); + *loc32 = value32; break; case thumb_movw: assert(thumbMode); @@ -1097,7 +1098,7 @@ value16 = ref.addend() & 0xFFFF; else value16 = (targetAddress + ref.addend()) & 0xFFFF; - write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16)); + *loc32 = setWordFromThumbMov(*loc32, value16); break; case thumb_movt: assert(thumbMode); @@ -1105,17 +1106,17 @@ value16 = ref.addend() >> 16; else value16 = (targetAddress + ref.addend()) >> 16; - write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16)); + *loc32 = setWordFromThumbMov(*loc32, value16); break; case thumb_movw_funcRel: assert(thumbMode); value16 = (targetAddress - inAtomAddress + ref.addend()) & 0xFFFF; - write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16)); + *loc32 = setWordFromThumbMov(*loc32, value16); break; case thumb_movt_funcRel: assert(thumbMode); value16 = (targetAddress - inAtomAddress + ref.addend()) >> 16; - write32(*loc32, _swap, setWordFromThumbMov(*loc32, value16)); + *loc32 = setWordFromThumbMov(*loc32, value16); break; case arm_b24: case arm_bl24: @@ -1124,8 +1125,9 @@ displacement = (ref.addend() - (fixupAddress + 8)); else displacement = (targetAddress - (fixupAddress + 8)) + ref.addend(); - value32 = setDisplacementInArmBranch(*loc32, displacement, targetIsThumb); - write32(*loc32, _swap, value32); + value32 = setDisplacementInArmBranch(*loc32, displacement, + targetIsThumb); + *loc32 = value32; break; case arm_movw: assert(!thumbMode); @@ -1133,7 +1135,7 @@ value16 = ref.addend() & 0xFFFF; else value16 = (targetAddress + ref.addend()) & 0xFFFF; - write32(*loc32, _swap, setWordFromArmMov(*loc32, value16)); + *loc32 = setWordFromArmMov(*loc32, value16); break; case arm_movt: assert(!thumbMode); @@ -1141,23 +1143,23 @@ value16 = ref.addend() >> 16; else value16 = (targetAddress + ref.addend()) >> 16; - write32(*loc32, _swap, setWordFromArmMov(*loc32, value16)); + *loc32 = setWordFromArmMov(*loc32, value16); break; case arm_movw_funcRel: assert(!thumbMode); value16 = (targetAddress - inAtomAddress + ref.addend()) & 0xFFFF; - write32(*loc32, _swap, setWordFromArmMov(*loc32, value16)); + *loc32 = setWordFromArmMov(*loc32, value16); break; case arm_movt_funcRel: assert(!thumbMode); value16 = (targetAddress - inAtomAddress + ref.addend()) >> 16; - write32(*loc32, _swap, setWordFromArmMov(*loc32, value16)); + *loc32 = setWordFromArmMov(*loc32, value16); break; case pointer32: - write32(*loc32, _swap, targetAddress + ref.addend()); + *loc32 = targetAddress + ref.addend(); break; case delta32: - write32(*loc32, _swap, targetAddress - fixupAddress + ref.addend()); + *loc32 = targetAddress - fixupAddress + ref.addend(); break; case lazyPointer: case lazyImmediateLocation: Index: lib/ReaderWriter/MachO/ArchHandler_arm64.cpp =================================================================== --- lib/ReaderWriter/MachO/ArchHandler_arm64.cpp +++ lib/ReaderWriter/MachO/ArchHandler_arm64.cpp @@ -15,6 +15,7 @@ #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Triple.h" +#include "llvm/Support/Endian.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Format.h" @@ -24,6 +25,12 @@ namespace lld { namespace mach_o { +using llvm::support::ulittle32_t; +using llvm::support::ulittle64_t; + +using llvm::support::little32_t; +using llvm::support::little64_t; + class ArchHandler_arm64 : public ArchHandler { public: ArchHandler_arm64(); @@ -111,7 +118,7 @@ std::error_code getReferenceInfo(const normalized::Relocation &reloc, const DefinedAtom *inAtom, uint32_t offsetInAtom, - uint64_t fixupAddress, bool swap, + uint64_t fixupAddress, bool isBig, FindAtomBySectionAndAddress atomFromAddress, FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind, @@ -122,7 +129,7 @@ const normalized::Relocation &reloc2, const DefinedAtom *inAtom, uint32_t offsetInAtom, - uint64_t fixupAddress, bool swap, + uint64_t fixupAddress, bool isBig, FindAtomBySectionAndAddress atomFromAddress, FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind, @@ -192,13 +199,9 @@ static uint32_t setDisplacementInADRP(uint32_t instr, int64_t disp); static Arm64_Kinds offset12KindFromInstruction(uint32_t instr); static uint32_t setImm12(uint32_t instr, uint32_t offset); - - const bool _swap; }; -ArchHandler_arm64::ArchHandler_arm64() - : _swap(!MachOLinkingContext::isHostEndian(MachOLinkingContext::arch_arm64)) { -} +ArchHandler_arm64::ArchHandler_arm64() {} ArchHandler_arm64::~ArchHandler_arm64() {} @@ -336,7 +339,7 @@ std::error_code ArchHandler_arm64::getReferenceInfo( const Relocation &reloc, const DefinedAtom *inAtom, uint32_t offsetInAtom, - uint64_t fixupAddress, bool swap, + uint64_t fixupAddress, bool isBig, FindAtomBySectionAndAddress atomFromAddress, FindAtomBySymbolIndex atomFromSymbolIndex, Reference::KindValue *kind, const lld::Atom **target, Reference::Addend *addend) { @@ -358,7 +361,7 @@ return std::error_code(); case ARM64_RELOC_PAGEOFF12 | rExtern | rLength4: // ex: ldr x0, [x1, _foo@PAGEOFF] - *kind = offset12KindFromInstruction(readS32(swap, fixupContent)); + *kind = offset12KindFromInstruction(*(little32_t *)fixupContent); if (auto ec = atomFromSymbolIndex(reloc.symbol, target)) return ec; *addend = 0; @@ -396,7 +399,7 @@ *kind = pointer64; if (auto ec = atomFromSymbolIndex(reloc.symbol, target)) return ec; - *addend = readS64(swap, fixupContent); + *addend = *(little64_t *)fixupContent; return std::error_code(); case ARM64_RELOC_POINTER_TO_GOT | rExtern | rLength8: // ex: .quad _foo@GOT @@ -456,7 +459,7 @@ *kind = delta64; if (auto ec = atomFromSymbolIndex(reloc2.symbol, target)) return ec; - *addend = readS64(swap, fixupContent) + offsetInAtom; + *addend = *(little64_t *)fixupContent + offsetInAtom; return std::error_code(); case ((ARM64_RELOC_SUBTRACTOR | rExtern | rLength4) << 16 | ARM64_RELOC_UNSIGNED | rExtern | rLength4): @@ -464,7 +467,7 @@ *kind = delta32; if (auto ec = atomFromSymbolIndex(reloc2.symbol, target)) return ec; - *addend = readS32(swap, fixupContent) + offsetInAtom; + *addend = *(little32_t *)fixupContent + offsetInAtom; return std::error_code(); default: return make_dynamic_error_code(Twine("unsupported arm64 relocation pair")); @@ -496,86 +499,79 @@ } } -void ArchHandler_arm64::applyFixupFinal(const Reference &ref, uint8_t *location, +void ArchHandler_arm64::applyFixupFinal(const Reference &ref, uint8_t *loc, uint64_t fixupAddress, uint64_t targetAddress, uint64_t inAtomAddress) { if (ref.kindNamespace() != Reference::KindNamespace::mach_o) return; assert(ref.kindArch() == Reference::KindArch::AArch64); - int32_t *loc32 = reinterpret_cast(location); - uint64_t *loc64 = reinterpret_cast(location); + ulittle32_t *loc32 = reinterpret_cast(loc); + ulittle64_t *loc64 = reinterpret_cast(loc); int32_t displacement; uint32_t instruction; uint32_t value32; switch (static_cast(ref.kindValue())) { case branch26: displacement = (targetAddress - fixupAddress) + ref.addend(); - value32 = setDisplacementInBranch26(*loc32, displacement); - write32(*loc32, _swap, value32); + *loc32 = setDisplacementInBranch26(*loc32, displacement); return; case page21: case gotPage21: case tlvPage21: displacement = ((targetAddress + ref.addend()) & (-4096)) - (fixupAddress & (-4096)); - value32 = setDisplacementInADRP(*loc32, displacement); - write32(*loc32, _swap, value32); + *loc32 = setDisplacementInADRP(*loc32, displacement); return; case offset12: case gotOffset12: case tlvOffset12: displacement = (targetAddress + ref.addend()) & 0x00000FFF; - value32 = setImm12(*loc32, displacement); - write32(*loc32, _swap, value32); + *loc32 = setImm12(*loc32, displacement); return; case offset12scale2: displacement = (targetAddress + ref.addend()) & 0x00000FFF; assert(((displacement & 0x1) == 0) && "scaled imm12 not accessing 2-byte aligneds"); - value32 = setImm12(*loc32, displacement >> 1); - write32(*loc32, _swap, value32); + *loc32 = setImm12(*loc32, displacement >> 1); return; case offset12scale4: displacement = (targetAddress + ref.addend()) & 0x00000FFF; assert(((displacement & 0x3) == 0) && "scaled imm12 not accessing 4-byte aligned"); - value32 = setImm12(*loc32, displacement >> 2); - write32(*loc32, _swap, value32); + *loc32 = setImm12(*loc32, displacement >> 2); return; case offset12scale8: displacement = (targetAddress + ref.addend()) & 0x00000FFF; assert(((displacement & 0x7) == 0) && "scaled imm12 not accessing 8-byte aligned"); - value32 = setImm12(*loc32, displacement >> 3); - write32(*loc32, _swap, value32); + *loc32 = setImm12(*loc32, displacement >> 3); return; case offset12scale16: displacement = (targetAddress + ref.addend()) & 0x00000FFF; assert(((displacement & 0xF) == 0) && "scaled imm12 not accessing 16-byte aligned"); - value32 = setImm12(*loc32, displacement >> 4); - write32(*loc32, _swap, value32); + *loc32 = setImm12(*loc32, displacement >> 4); return; case addOffset12: - instruction = read32(_swap, *loc32); + instruction = *loc32; assert(((instruction & 0xFFC00000) == 0xF9400000) && "GOT reloc is not an LDR instruction"); displacement = (targetAddress + ref.addend()) & 0x00000FFF; value32 = 0x91000000 | (instruction & 0x000003FF); instruction = setImm12(value32, displacement); - write32(*loc32, _swap, instruction); + *loc32 = instruction; return; case pointer64: case pointer64ToGOT: - write64(*loc64, _swap, targetAddress + ref.addend()); + *loc64 = targetAddress + ref.addend(); return; case delta64: - write64(*loc64, _swap, (targetAddress - fixupAddress) + ref.addend()); + *loc64 = (targetAddress - fixupAddress) + ref.addend(); return; case delta32: case delta32ToGOT: - write32(*loc32, _swap, (targetAddress - fixupAddress) + ref.addend()); + *loc32 = (targetAddress - fixupAddress) + ref.addend(); return; case lazyPointer: case lazyImmediateLocation: @@ -589,26 +585,23 @@ } void ArchHandler_arm64::applyFixupRelocatable(const Reference &ref, - uint8_t *location, + uint8_t *loc, uint64_t fixupAddress, uint64_t targetAddress, uint64_t inAtomAddress) { if (ref.kindNamespace() != Reference::KindNamespace::mach_o) return; assert(ref.kindArch() == Reference::KindArch::AArch64); - int32_t *loc32 = reinterpret_cast(location); - uint64_t *loc64 = reinterpret_cast(location); - uint32_t value32; + ulittle32_t *loc32 = reinterpret_cast(loc); + ulittle64_t *loc64 = reinterpret_cast(loc); switch (static_cast(ref.kindValue())) { case branch26: - value32 = setDisplacementInBranch26(*loc32, 0); - write32(*loc32, _swap, value32); + *loc32 = setDisplacementInBranch26(*loc32, 0); return; case page21: case gotPage21: case tlvPage21: - value32 = setDisplacementInADRP(*loc32, 0); - write32(*loc32, _swap, value32); + *loc32 = setDisplacementInADRP(*loc32, 0); return; case offset12: case offset12scale2: @@ -617,23 +610,22 @@ case offset12scale16: case gotOffset12: case tlvOffset12: - value32 = setImm12(*loc32, 0); - write32(*loc32, _swap, value32); + *loc32 = setImm12(*loc32, 0); return; case pointer64: - write64(*loc64, _swap, ref.addend()); + *loc64 = ref.addend(); return; case delta64: - write64(*loc64, _swap, ref.addend() + inAtomAddress - fixupAddress); + *loc64 = ref.addend() + inAtomAddress - fixupAddress; return; case delta32: - write32(*loc32, _swap, ref.addend() + inAtomAddress - fixupAddress); + *loc32 = ref.addend() + inAtomAddress - fixupAddress; return; case pointer64ToGOT: - write64(*loc64, _swap, 0); + *loc64 = 0; return; case delta32ToGOT: - write32(*loc32, _swap, -fixupAddress); + *loc32 = -fixupAddress; return; case addOffset12: llvm_unreachable("lazy reference kind implies GOT pass was run"); Index: lib/ReaderWriter/MachO/ArchHandler_x86.cpp =================================================================== --- lib/ReaderWriter/MachO/ArchHandler_x86.cpp +++ lib/ReaderWriter/MachO/ArchHandler_x86.cpp @@ -15,6 +15,7 @@ #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Triple.h" +#include "llvm/Support/Endian.h" #include "llvm/Support/ErrorHandling.h" using namespace llvm::MachO; @@ -23,6 +24,12 @@ namespace lld { namespace mach_o { +using llvm::support::ulittle16_t; +using llvm::support::ulittle32_t; + +using llvm::support::little16_t; +using llvm::support::little32_t; + class ArchHandler_x86 : public ArchHandler { public: ArchHandler_x86(); @@ -158,16 +165,13 @@ uint64_t fixupAddress, uint64_t targetAddress, uint64_t inAtomAddress); - - const bool _swap; }; //===----------------------------------------------------------------------===// // ArchHandler_x86 //===----------------------------------------------------------------------===// -ArchHandler_x86::ArchHandler_x86() : - _swap(!MachOLinkingContext::isHostEndian(MachOLinkingContext::arch_x86)) {} +ArchHandler_x86::ArchHandler_x86() {} ArchHandler_x86::~ArchHandler_x86() { } @@ -259,18 +263,18 @@ *kind = branch32; if (E ec = atomFromSymbolIndex(reloc.symbol, target)) return ec; - *addend = fixupAddress + 4 + readS32(swap, fixupContent); + *addend = fixupAddress + 4 + *(little32_t *)fixupContent; break; case GENERIC_RELOC_VANILLA | rPcRel | rLength4: // ex: call _foo (and _foo defined) *kind = branch32; - targetAddress = fixupAddress + 4 + readS32(swap, fixupContent); + targetAddress = fixupAddress + 4 + *(little32_t *)fixupContent; return atomFromAddress(reloc.symbol, targetAddress, target, addend); break; case GENERIC_RELOC_VANILLA | rScattered | rPcRel | rLength4: // ex: call _foo+n (and _foo defined) *kind = branch32; - targetAddress = fixupAddress + 4 + readS32(swap, fixupContent); + targetAddress = fixupAddress + 4 + *(little32_t *)fixupContent; if (E ec = atomFromAddress(0, reloc.value, target, addend)) return ec; *addend = targetAddress - reloc.value; @@ -280,18 +284,18 @@ *kind = branch16; if (E ec = atomFromSymbolIndex(reloc.symbol, target)) return ec; - *addend = fixupAddress + 2 + readS16(swap, fixupContent); + *addend = fixupAddress + 2 + *(little16_t *)fixupContent; break; case GENERIC_RELOC_VANILLA | rPcRel | rLength2: // ex: callw _foo (and _foo defined) *kind = branch16; - targetAddress = fixupAddress + 2 + readS16(swap, fixupContent); + targetAddress = fixupAddress + 2 + *(little16_t *)fixupContent; return atomFromAddress(reloc.symbol, targetAddress, target, addend); break; case GENERIC_RELOC_VANILLA | rScattered | rPcRel | rLength2: // ex: callw _foo+n (and _foo defined) *kind = branch16; - targetAddress = fixupAddress + 2 + readS16(swap, fixupContent); + targetAddress = fixupAddress + 2 + *(little16_t *)fixupContent; if (E ec = atomFromAddress(0, reloc.value, target, addend)) return ec; *addend = targetAddress - reloc.value; @@ -305,7 +309,7 @@ : pointer32; if (E ec = atomFromSymbolIndex(reloc.symbol, target)) return ec; - *addend = readU32(swap, fixupContent); + *addend = *(ulittle32_t *)fixupContent; break; case GENERIC_RELOC_VANILLA | rLength4: // ex: movl _foo, %eax (and _foo defined) @@ -314,7 +318,7 @@ *kind = ((perms & DefinedAtom::permR_X) == DefinedAtom::permR_X) ? abs32 : pointer32; - targetAddress = readU32(swap, fixupContent); + targetAddress = *(ulittle32_t *)fixupContent; return atomFromAddress(reloc.symbol, targetAddress, target, addend); break; case GENERIC_RELOC_VANILLA | rScattered | rLength4: @@ -325,7 +329,7 @@ : pointer32; if (E ec = atomFromAddress(0, reloc.value, target, addend)) return ec; - *addend = readU32(swap, fixupContent) - reloc.value; + *addend = *(ulittle32_t *)fixupContent - reloc.value; break; default: return make_dynamic_error_code(Twine("unsupported i386 relocation type")); @@ -360,7 +364,7 @@ GENERIC_RELOC_PAIR | rScattered | rLength4): toAddress = reloc1.value; fromAddress = reloc2.value; - value = readS32(swap, fixupContent); + value = *(little32_t *)fixupContent; ec = atomFromAddr(0, toAddress, target, &offsetInTo); if (ec) return ec; @@ -426,34 +430,33 @@ } } -void ArchHandler_x86::applyFixupFinal(const Reference &ref, uint8_t *location, +void ArchHandler_x86::applyFixupFinal(const Reference &ref, uint8_t *loc, uint64_t fixupAddress, uint64_t targetAddress, uint64_t inAtomAddress) { if (ref.kindNamespace() != Reference::KindNamespace::mach_o) return; assert(ref.kindArch() == Reference::KindArch::x86); - int32_t *loc32 = reinterpret_cast(location); - int16_t *loc16 = reinterpret_cast(location); + ulittle32_t *loc32 = reinterpret_cast(loc); switch (ref.kindValue()) { case branch32: - write32(*loc32, _swap, (targetAddress - (fixupAddress + 4)) + ref.addend()); + *loc32 = (targetAddress - (fixupAddress + 4)) + ref.addend(); break; case branch16: - write16(*loc16, _swap, (targetAddress - (fixupAddress + 2)) + ref.addend()); + *loc32 = (targetAddress - (fixupAddress + 2)) + ref.addend(); break; case pointer32: case abs32: - write32(*loc32, _swap, targetAddress + ref.addend()); + *loc32 = targetAddress + ref.addend(); break; case funcRel32: - write32(*loc32, _swap, targetAddress - inAtomAddress + ref.addend()); + *loc32 = targetAddress - inAtomAddress + ref.addend(); break; case delta32: - write32(*loc32, _swap, targetAddress - fixupAddress + ref.addend()); + *loc32 = targetAddress - fixupAddress + ref.addend(); break; case negDelta32: - write32(*loc32, _swap, fixupAddress - targetAddress + ref.addend()); + *loc32 = fixupAddress - targetAddress + ref.addend(); break; case modeCode: case modeData: @@ -468,38 +471,38 @@ } void ArchHandler_x86::applyFixupRelocatable(const Reference &ref, - uint8_t *location, + uint8_t *loc, uint64_t fixupAddress, uint64_t targetAddress, uint64_t inAtomAddress) { - int32_t *loc32 = reinterpret_cast(location); - int16_t *loc16 = reinterpret_cast(location); bool useExternalReloc = useExternalRelocationTo(*ref.target()); + ulittle16_t *loc16 = reinterpret_cast(loc); + ulittle32_t *loc32 = reinterpret_cast(loc); switch (ref.kindValue()) { case branch32: if (useExternalReloc) - write32(*loc32, _swap, ref.addend() - (fixupAddress + 4)); + *loc32 = ref.addend() - (fixupAddress + 4); else - write32(*loc32, _swap, (targetAddress - (fixupAddress+4)) + ref.addend()); + *loc32 =(targetAddress - (fixupAddress+4)) + ref.addend(); break; case branch16: if (useExternalReloc) - write16(*loc16, _swap, ref.addend() - (fixupAddress + 2)); + *loc16 = ref.addend() - (fixupAddress + 2); else - write16(*loc16, _swap, (targetAddress - (fixupAddress+2)) + ref.addend()); + *loc16 = (targetAddress - (fixupAddress+2)) + ref.addend(); break; case pointer32: case abs32: - write32(*loc32, _swap, targetAddress + ref.addend()); + *loc32 = targetAddress + ref.addend(); break; case funcRel32: - write32(*loc32, _swap, targetAddress - inAtomAddress + ref.addend()); // FIXME + *loc32 = targetAddress - inAtomAddress + ref.addend(); // FIXME break; case delta32: - write32(*loc32, _swap, targetAddress - fixupAddress + ref.addend()); + *loc32 = targetAddress - fixupAddress + ref.addend(); break; case negDelta32: - write32(*loc32, _swap, fixupAddress - targetAddress + ref.addend()); + *loc32 = fixupAddress - targetAddress + ref.addend(); break; case modeCode: case modeData: Index: lib/ReaderWriter/MachO/ArchHandler_x86_64.cpp =================================================================== --- lib/ReaderWriter/MachO/ArchHandler_x86_64.cpp +++ lib/ReaderWriter/MachO/ArchHandler_x86_64.cpp @@ -15,6 +15,7 @@ #include "llvm/ADT/StringSwitch.h" #include "llvm/ADT/Triple.h" +#include "llvm/Support/Endian.h" #include "llvm/Support/ErrorHandling.h" using namespace llvm::MachO; @@ -23,6 +24,12 @@ namespace lld { namespace mach_o { +using llvm::support::ulittle32_t; +using llvm::support::ulittle64_t; + +using llvm::support::little32_t; +using llvm::support::little64_t; + class ArchHandler_x86_64 : public ArchHandler { public: ArchHandler_x86_64(); @@ -202,13 +209,10 @@ uint64_t fixupAddress, uint64_t targetAddress, uint64_t inAtomAddress); - - const bool _swap; }; -ArchHandler_x86_64::ArchHandler_x86_64() : - _swap(!MachOLinkingContext::isHostEndian(MachOLinkingContext::arch_x86_64)) {} +ArchHandler_x86_64::ArchHandler_x86_64() { } ArchHandler_x86_64::~ArchHandler_x86_64() { } @@ -337,25 +341,25 @@ case ripRel32: if (E ec = atomFromSymbolIndex(reloc.symbol, target)) return ec; - *addend = readS32(swap, fixupContent); + *addend = *(little32_t *)fixupContent; return std::error_code(); case ripRel32Minus1: if (E ec = atomFromSymbolIndex(reloc.symbol, target)) return ec; - *addend = readS32(swap, fixupContent) + 1; + *addend = *(little32_t *)fixupContent + 1; return std::error_code(); case ripRel32Minus2: if (E ec = atomFromSymbolIndex(reloc.symbol, target)) return ec; - *addend = readS32(swap, fixupContent) + 2; + *addend = *(little32_t *)fixupContent + 2; return std::error_code(); case ripRel32Minus4: if (E ec = atomFromSymbolIndex(reloc.symbol, target)) return ec; - *addend = readS32(swap, fixupContent) + 4; + *addend = *(little32_t *)fixupContent + 4; return std::error_code(); case ripRel32Anon: - targetAddress = fixupAddress + 4 + readS32(swap, fixupContent); + targetAddress = fixupAddress + 4 + *(little32_t *)fixupContent; return atomFromAddress(reloc.symbol, targetAddress, target, addend); case ripRel32GotLoad: case ripRel32Got: @@ -366,10 +370,10 @@ case pointer64: if (E ec = atomFromSymbolIndex(reloc.symbol, target)) return ec; - *addend = readS64(swap, fixupContent); + *addend = *(little64_t *)fixupContent; return std::error_code(); case pointer64Anon: - targetAddress = readS64(swap, fixupContent); + targetAddress = *(little64_t *)fixupContent; return atomFromAddress(reloc.symbol, targetAddress, target, addend); default: llvm_unreachable("bad reloc kind"); @@ -423,18 +427,18 @@ case delta64: if (E ec = atomFromSymbolIndex(reloc2.symbol, target)) return ec; - *addend = readS64(swap, fixupContent) + offsetInAtom; + *addend = *(little64_t *)fixupContent + offsetInAtom; return std::error_code(); case delta32: if (E ec = atomFromSymbolIndex(reloc2.symbol, target)) return ec; - *addend = readS32(swap, fixupContent) + offsetInAtom; + *addend = *(little32_t *)fixupContent + offsetInAtom; return std::error_code(); case delta64Anon: - targetAddress = offsetInAtom + readS64(swap, fixupContent); + targetAddress = offsetInAtom + *(little64_t *)fixupContent; return atomFromAddress(reloc2.symbol, targetAddress, target, addend); case delta32Anon: - targetAddress = offsetInAtom + readS32(swap, fixupContent); + targetAddress = offsetInAtom + *(little32_t *)fixupContent; return atomFromAddress(reloc2.symbol, targetAddress, target, addend); default: llvm_unreachable("bad reloc pair kind"); @@ -469,52 +473,52 @@ } void ArchHandler_x86_64::applyFixupFinal( - const Reference &ref, uint8_t *location, uint64_t fixupAddress, + const Reference &ref, uint8_t *loc, uint64_t fixupAddress, uint64_t targetAddress, uint64_t inAtomAddress, uint64_t imageBaseAddress, FindAddressForAtom findSectionAddress) { + ulittle32_t *loc32 = reinterpret_cast(loc); + ulittle64_t *loc64 = reinterpret_cast(loc); if (ref.kindNamespace() != Reference::KindNamespace::mach_o) return; assert(ref.kindArch() == Reference::KindArch::x86_64); - int32_t *loc32 = reinterpret_cast(location); - uint64_t *loc64 = reinterpret_cast(location); switch (static_cast(ref.kindValue())) { case branch32: case ripRel32: case ripRel32Anon: case ripRel32Got: case ripRel32GotLoad: - write32(*loc32, _swap, (targetAddress - (fixupAddress + 4)) + ref.addend()); + *loc32 = targetAddress - (fixupAddress + 4) + ref.addend(); return; case pointer64: case pointer64Anon: - write64(*loc64, _swap, targetAddress + ref.addend()); + *loc64 = targetAddress + ref.addend(); return; case ripRel32Minus1: - write32(*loc32, _swap, (targetAddress - (fixupAddress + 5)) + ref.addend()); + *loc32 = targetAddress - (fixupAddress + 5) + ref.addend(); return; case ripRel32Minus2: - write32(*loc32, _swap, (targetAddress - (fixupAddress + 6)) + ref.addend()); + *loc32 = targetAddress - (fixupAddress + 6) + ref.addend(); return; case ripRel32Minus4: - write32(*loc32, _swap, (targetAddress - (fixupAddress + 8)) + ref.addend()); + *loc32 = targetAddress - (fixupAddress + 8) + ref.addend(); return; case delta32: case delta32Anon: - write32(*loc32, _swap, (targetAddress - fixupAddress) + ref.addend()); + *loc32 = targetAddress - fixupAddress + ref.addend(); return; case delta64: case delta64Anon: case unwindFDEToFunction: - write64(*loc64, _swap, (targetAddress - fixupAddress) + ref.addend()); + *loc64 = targetAddress - fixupAddress + ref.addend(); return; case ripRel32GotLoadNowLea: // Change MOVQ to LEA - assert(location[-2] == 0x8B); - location[-2] = 0x8D; - write32(*loc32, _swap, (targetAddress - (fixupAddress + 4)) + ref.addend()); + assert(loc[-2] == 0x8B); + loc[-2] = 0x8D; + *loc32 = targetAddress - (fixupAddress + 4) + ref.addend(); return; case negDelta32: - write32(*loc32, _swap, fixupAddress - targetAddress + ref.addend()); + *loc32 = fixupAddress - targetAddress + ref.addend(); return; case lazyPointer: case lazyImmediateLocation: @@ -522,13 +526,12 @@ return; case imageOffset: case imageOffsetGot: - write32(*loc32, _swap, (targetAddress - imageBaseAddress) + ref.addend()); + *loc32 = (targetAddress - imageBaseAddress) + ref.addend(); return; case unwindInfoToEhFrame: { uint64_t val = targetAddress - findSectionAddress(*ref.target()) + ref.addend(); assert(val < 0xffffffU && "offset in __eh_frame too large"); - uint32_t encoding = read32(_swap, *loc32) & 0xff000000U; - write32(*loc32, _swap, encoding | val); + *loc32 = (*loc32 & 0xff000000U) | val; return; } case invalid: @@ -540,51 +543,51 @@ void ArchHandler_x86_64::applyFixupRelocatable(const Reference &ref, - uint8_t *location, + uint8_t *loc, uint64_t fixupAddress, uint64_t targetAddress, uint64_t inAtomAddress) { - int32_t *loc32 = reinterpret_cast(location); - uint64_t *loc64 = reinterpret_cast(location); + ulittle32_t *loc32 = reinterpret_cast(loc); + ulittle64_t *loc64 = reinterpret_cast(loc); switch (static_cast(ref.kindValue())) { case branch32: case ripRel32: case ripRel32Got: case ripRel32GotLoad: - write32(*loc32, _swap, ref.addend()); + *loc32 = ref.addend(); return; case ripRel32Anon: - write32(*loc32, _swap, (targetAddress - (fixupAddress + 4)) + ref.addend()); + *loc32 = (targetAddress - (fixupAddress + 4)) + ref.addend(); return; case pointer64: - write64(*loc64, _swap, ref.addend()); + *loc64 = ref.addend(); return; case pointer64Anon: - write64(*loc64, _swap, targetAddress + ref.addend()); + *loc64 = targetAddress + ref.addend(); return; case ripRel32Minus1: - write32(*loc32, _swap, ref.addend() - 1); + *loc32 = ref.addend() - 1; return; case ripRel32Minus2: - write32(*loc32, _swap, ref.addend() - 2); + *loc32 = ref.addend() - 2; return; case ripRel32Minus4: - write32(*loc32, _swap, ref.addend() - 4); + *loc32 = ref.addend() - 4; return; case delta32: - write32(*loc32, _swap, ref.addend() + inAtomAddress - fixupAddress); + *loc32 = ref.addend() + inAtomAddress - fixupAddress; return; case delta32Anon: - write32(*loc32, _swap, (targetAddress - fixupAddress) + ref.addend()); + *loc32 = (targetAddress - fixupAddress) + ref.addend(); return; case delta64: - write64(*loc64, _swap, ref.addend() + inAtomAddress - fixupAddress); + *loc64 = ref.addend() + inAtomAddress - fixupAddress; return; case delta64Anon: - write64(*loc64, _swap, (targetAddress - fixupAddress) + ref.addend()); + *loc64 = (targetAddress - fixupAddress) + ref.addend(); return; case negDelta32: - write32(*loc32, _swap, fixupAddress - targetAddress + ref.addend()); + *loc32 = fixupAddress - targetAddress + ref.addend(); return; case ripRel32GotLoadNowLea: llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run"); Index: lib/ReaderWriter/MachO/CompactUnwindPass.cpp =================================================================== --- lib/ReaderWriter/MachO/CompactUnwindPass.cpp +++ lib/ReaderWriter/MachO/CompactUnwindPass.cpp @@ -68,7 +68,7 @@ class UnwindInfoAtom : public SimpleDefinedAtom { public: - UnwindInfoAtom(ArchHandler &archHandler, const File &file, bool swap, + UnwindInfoAtom(ArchHandler &archHandler, const File &file, bool isBig, std::vector commonEncodings, std::vector personalities, std::vector pages, uint32_t numLSDAs) @@ -81,7 +81,7 @@ _lsdaIndexOffset(_topLevelIndexOffset + 3 * (pages.size() + 1) * sizeof(uint32_t)), _firstPageOffset(_lsdaIndexOffset + 2 * numLSDAs * sizeof(uint32_t)), - _swap(swap) { + _isBig(isBig) { addHeader(commonEncodings.size(), personalities.size(), pages.size()); addCommonEncodings(commonEncodings); @@ -112,21 +112,22 @@ uint32_t headerSize = 7 * sizeof(uint32_t); _contents.resize(headerSize); - int32_t *headerEntries = (int32_t *)_contents.data(); + uint8_t *headerEntries = _contents.data(); // version - write32(headerEntries[0], _swap, 1); + write32(headerEntries, 1, _isBig); // commonEncodingsArraySectionOffset - write32(headerEntries[1], _swap, _commonEncodingsOffset); + write32(headerEntries + sizeof(uint32_t), _commonEncodingsOffset, _isBig); // commonEncodingsArrayCount - write32(headerEntries[2], _swap, numCommon); + write32(headerEntries + 2 * sizeof(uint32_t), numCommon, _isBig); // personalityArraySectionOffset - write32(headerEntries[3], _swap, _personalityArrayOffset); + write32(headerEntries + 3 * sizeof(uint32_t), _personalityArrayOffset, + _isBig); // personalityArrayCount - write32(headerEntries[4], _swap, numPersonalities); + write32(headerEntries + 4 * sizeof(uint32_t), numPersonalities, _isBig); // indexSectionOffset - write32(headerEntries[5], _swap, _topLevelIndexOffset); + write32(headerEntries + 5 * sizeof(uint32_t), _topLevelIndexOffset, _isBig); // indexCount - write32(headerEntries[6], _swap, numPages + 1); + write32(headerEntries + 6 * sizeof(uint32_t), numPages + 1, _isBig); } /// Add the list of common encodings to the section; this is simply an array @@ -136,11 +137,12 @@ _contents.resize(_commonEncodingsOffset + commonEncodings.size() * sizeof(uint32_t)); - int32_t *commonEncodingsArea = - (int32_t *)&_contents[_commonEncodingsOffset]; + uint8_t *commonEncodingsArea = &_contents[_commonEncodingsOffset]; - for (uint32_t encoding : commonEncodings) - write32(*commonEncodingsArea++, _swap, encoding); + for (uint32_t encoding : commonEncodings) { + write32(commonEncodingsArea, encoding, _isBig); + commonEncodingsArea += sizeof(uint32_t); + } } void addPersonalityFunctions(std::vector personalities) { @@ -162,16 +164,16 @@ // The most difficult job here is calculating the LSDAs; everything else // follows fairly naturally, but we can't state where the first - int32_t *indexData = (int32_t *)&_contents[_topLevelIndexOffset]; + uint8_t *indexData = &_contents[_topLevelIndexOffset]; uint32_t numLSDAs = 0; for (unsigned i = 0; i < pages.size(); ++i) { // functionOffset addImageReference(_topLevelIndexOffset + 3 * i * sizeof(uint32_t), pages[i].entries[0].rangeStart); // secondLevelPagesSectionOffset - write32(indexData[3 * i + 1], _swap, pageLoc); - write32(indexData[3 * i + 2], _swap, - _lsdaIndexOffset + numLSDAs * 2 * sizeof(uint32_t)); + write32(indexData + (3 * i + 1) * sizeof(uint32_t), pageLoc, _isBig); + write32(indexData + (3 * i + 2) * sizeof(uint32_t), + _lsdaIndexOffset + numLSDAs * 2 * sizeof(uint32_t), _isBig); for (auto &entry : pages[i].entries) if (entry.lsdaLocation) @@ -184,8 +186,8 @@ 3 * pages.size() * sizeof(uint32_t), finalEntry.rangeStart, finalEntry.rangeLength); // secondLevelPagesSectionOffset => 0 - indexData[3 * pages.size() + 2] = - _lsdaIndexOffset + numLSDAs * 2 * sizeof(uint32_t); + write32(indexData + (3 * pages.size() + 2) * sizeof(uint32_t), + _lsdaIndexOffset + numLSDAs * 2 * sizeof(uint32_t), _isBig); } void addLSDAIndexes(std::vector &pages, uint32_t numLSDAs) { @@ -220,18 +222,17 @@ using normalized::write32; using normalized::write16; // 2 => regular page - write32(*(int32_t *)&_contents[curPageOffset], _swap, 2); + write32(&_contents[curPageOffset], 2, _isBig); // offset of 1st entry - write16(*(int16_t *)&_contents[curPageOffset + 4], _swap, headerSize); - write16(*(int16_t *)&_contents[curPageOffset + 6], _swap, - page.entries.size()); + write16(&_contents[curPageOffset + 4], headerSize, _isBig); + write16(&_contents[curPageOffset + 6], page.entries.size(), _isBig); uint32_t pagePos = curPageOffset + headerSize; for (auto &entry : page.entries) { addImageReference(pagePos, entry.rangeStart); - write32(reinterpret_cast(_contents.data() + pagePos)[1], _swap, - entry.encoding); + write32(_contents.data() + pagePos + sizeof(uint32_t), entry.encoding, + _isBig); if ((entry.encoding & 0x0f000000U) == _archHandler.dwarfCompactUnwindType()) addEhFrameReference(pagePos + sizeof(uint32_t), entry.ehFrame); @@ -265,7 +266,7 @@ uint32_t _topLevelIndexOffset; uint32_t _lsdaIndexOffset; uint32_t _firstPageOffset; - bool _swap; + bool _isBig; }; /// Pass for instantiating and optimizing GOT slots. @@ -275,7 +276,7 @@ CompactUnwindPass(const MachOLinkingContext &context) : _context(context), _archHandler(_context.archHandler()), _file(""), - _swap(!MachOLinkingContext::isHostEndian(_context.arch())) {} + _isBig(MachOLinkingContext::isBigEndian(_context.arch())) {} private: void perform(std::unique_ptr &mergedFile) override { @@ -335,7 +336,7 @@ // FIXME: we should also erase all compact-unwind atoms; their job is done. UnwindInfoAtom *unwind = new (_file.allocator()) - UnwindInfoAtom(_archHandler, _file, _swap, std::vector(), + UnwindInfoAtom(_archHandler, _file, _isBig, std::vector(), personalities, pages, numLSDAs); mergedFile->addAtom(*unwind); } @@ -402,8 +403,9 @@ using normalized::read32; entry.rangeLength = - read32(_swap, ((uint32_t *)atom->rawContent().data())[2]); - entry.encoding = read32(_swap, ((uint32_t *)atom->rawContent().data())[3]); + read32(atom->rawContent().data() + 2 * sizeof(uint32_t), _isBig); + entry.encoding = + read32(atom->rawContent().data() + 3 * sizeof(uint32_t), _isBig); return entry; } @@ -413,7 +415,7 @@ for (const DefinedAtom *ehFrameAtom : mergedFile->defined()) { if (ehFrameAtom->contentType() != DefinedAtom::typeCFI) continue; - if (ArchHandler::isDwarfCIE(_swap, ehFrameAtom)) + if (ArchHandler::isDwarfCIE(_isBig, ehFrameAtom)) continue; if (const Atom *function = _archHandler.fdeTargetFunction(ehFrameAtom)) @@ -504,7 +506,7 @@ const MachOLinkingContext &_context; mach_o::ArchHandler &_archHandler; MachOFile _file; - bool _swap; + bool _isBig; }; void addCompactUnwindPass(PassManager &pm, const MachOLinkingContext &ctx) { Index: lib/ReaderWriter/MachO/MachONormalizedFileBinaryReader.cpp =================================================================== --- lib/ReaderWriter/MachO/MachONormalizedFileBinaryReader.cpp +++ lib/ReaderWriter/MachO/MachONormalizedFileBinaryReader.cpp @@ -55,14 +55,14 @@ // Utility to call a lambda expression on each load command. static std::error_code forEachLoadCommand( - StringRef lcRange, unsigned lcCount, bool swap, bool is64, + StringRef lcRange, unsigned lcCount, bool isBig, bool is64, std::function func) { const char* p = lcRange.begin(); for (unsigned i=0; i < lcCount; ++i) { const load_command *lc = reinterpret_cast(p); load_command lcCopy; const load_command *slc = lc; - if (swap) { + if (isBig != llvm::sys::IsBigEndianHost) { memcpy(&lcCopy, lc, sizeof(load_command)); swapStruct(lcCopy); slc = &lcCopy; @@ -80,7 +80,7 @@ } static std::error_code appendRelocations(Relocations &relocs, StringRef buffer, - bool swap, bool bigEndian, + bool bigEndian, uint32_t reloff, uint32_t nreloc) { if ((reloff + nreloc*8) > buffer.size()) return make_error_code(llvm::errc::executable_format_error); @@ -88,24 +88,24 @@ reinterpret_cast(buffer.begin()+reloff); for(uint32_t i=0; i < nreloc; ++i) { - relocs.push_back(unpackRelocation(relocsArray[i], swap, bigEndian)); + relocs.push_back(unpackRelocation(relocsArray[i], bigEndian)); } return std::error_code(); } static std::error_code -appendIndirectSymbols(IndirectSymbols &isyms, StringRef buffer, bool swap, - bool bigEndian, uint32_t istOffset, uint32_t istCount, +appendIndirectSymbols(IndirectSymbols &isyms, StringRef buffer, bool isBig, + uint32_t istOffset, uint32_t istCount, uint32_t startIndex, uint32_t count) { if ((istOffset + istCount*4) > buffer.size()) return make_error_code(llvm::errc::executable_format_error); if (startIndex+count > istCount) return make_error_code(llvm::errc::executable_format_error); - const uint32_t *indirectSymbolArray = - reinterpret_cast(buffer.begin()+istOffset); + const uint8_t *indirectSymbolArray = (const uint8_t *)buffer.data(); for(uint32_t i=0; i < count; ++i) { - isyms.push_back(read32(swap, indirectSymbolArray[startIndex+i])); + isyms.push_back(read32( + indirectSymbolArray + (startIndex + i) * sizeof(uint32_t), isBig)); } return std::error_code(); } @@ -118,23 +118,23 @@ } -static bool isMachOHeader(const mach_header *mh, bool &is64, bool &swap) { - switch (mh->magic) { +static bool isMachOHeader(const mach_header *mh, bool &is64, bool &isBig) { + switch (read32(&mh->magic, false)) { case llvm::MachO::MH_MAGIC: is64 = false; - swap = false; + isBig = false; return true; case llvm::MachO::MH_MAGIC_64: is64 = true; - swap = false; + isBig = false; return true; case llvm::MachO::MH_CIGAM: is64 = false; - swap = true; + isBig = true; return true; case llvm::MachO::MH_CIGAM_64: is64 = true; - swap = true; + isBig = true; return true; default: return false; @@ -156,17 +156,18 @@ // If file buffer does not start with MH_MAGIC (and variants), not obj file. const mach_header *mh = reinterpret_cast( fileBuffer.begin()); - bool is64, swap; - if (!isMachOHeader(mh, is64, swap)) + bool is64, isBig; + if (!isMachOHeader(mh, is64, isBig)) return false; // If not MH_OBJECT, not object file. - if (read32(swap, mh->filetype) != MH_OBJECT) + if (read32(&mh->filetype, isBig) != MH_OBJECT) return false; // Lookup up arch from cpu/subtype pair. - arch = MachOLinkingContext::archFromCpuType(read32(swap, mh->cputype), - read32(swap, mh->cpusubtype)); + arch = MachOLinkingContext::archFromCpuType( + read32(&mh->cputype, isBig), + read32(&mh->cpusubtype, isBig)); return true; } @@ -219,14 +220,14 @@ } // Determine endianness and pointer size for mach-o file. - bool is64, swap; - if (!isMachOHeader(mh, is64, swap)) + bool is64, isBig; + if (!isMachOHeader(mh, is64, isBig)) return make_error_code(llvm::errc::executable_format_error); // Endian swap header, if needed. mach_header headerCopy; const mach_header *smh = mh; - if (swap) { + if (isBig != llvm::sys::IsBigEndianHost) { memcpy(&headerCopy, mh, sizeof(mach_header)); swapStruct(headerCopy); smh = &headerCopy; @@ -249,7 +250,6 @@ + MachOLinkingContext::nameFromArch(f->arch) + ")" )); } - bool isBigEndianArch = MachOLinkingContext::isBigEndian(f->arch); // Copy file type and flags f->fileType = HeaderFileType(smh->filetype); f->flags = smh->flags; @@ -258,13 +258,13 @@ // Pre-scan load commands looking for indirect symbol table. uint32_t indirectSymbolTableOffset = 0; uint32_t indirectSymbolTableCount = 0; - std::error_code ec = forEachLoadCommand(lcRange, lcCount, swap, is64, + std::error_code ec = forEachLoadCommand(lcRange, lcCount, isBig, is64, [&](uint32_t cmd, uint32_t size, const char *lc) -> bool { if (cmd == LC_DYSYMTAB) { const dysymtab_command *d = reinterpret_cast(lc); - indirectSymbolTableOffset = read32(swap, d->indirectsymoff); - indirectSymbolTableCount = read32(swap, d->nindirectsyms); + indirectSymbolTableOffset = read32(&d->indirectsymoff, isBig); + indirectSymbolTableCount = read32(&d->nindirectsyms, isBig); return true; } return false; @@ -276,16 +276,14 @@ const data_in_code_entry *dataInCode = nullptr; const dyld_info_command *dyldInfo = nullptr; uint32_t dataInCodeSize = 0; - ec = forEachLoadCommand(lcRange, lcCount, swap, is64, + ec = forEachLoadCommand(lcRange, lcCount, isBig, is64, [&] (uint32_t cmd, uint32_t size, const char* lc) -> bool { switch(cmd) { case LC_SEGMENT_64: if (is64) { const segment_command_64 *seg = reinterpret_cast(lc); - const unsigned sectionCount = (swap - ? llvm::sys::getSwappedBytes(seg->nsects) - : seg->nsects); + const unsigned sectionCount = read32(&seg->nsects, isBig); const section_64 *sects = reinterpret_cast (lc + sizeof(segment_command_64)); const unsigned lcSize = sizeof(segment_command_64) @@ -298,26 +296,27 @@ Section section; section.segmentName = getString16(sect->segname); section.sectionName = getString16(sect->sectname); - section.type = (SectionType)(read32(swap, sect->flags) - & SECTION_TYPE); - section.attributes = read32(swap, sect->flags) & SECTION_ATTRIBUTES; - section.alignment = read32(swap, sect->align); - section.address = read64(swap, sect->addr); + section.type = (SectionType)(read32(§->flags, isBig) & + SECTION_TYPE); + section.attributes = read32(§->flags, isBig) & SECTION_ATTRIBUTES; + section.alignment = read32(§->align, isBig); + section.address = read64(§->addr, isBig); const uint8_t *content = - (uint8_t *)start + read32(swap, sect->offset); - size_t contentSize = read64(swap, sect->size); + (uint8_t *)start + read32(§->offset, isBig); + size_t contentSize = read64(§->size, isBig); // Note: this assign() is copying the content bytes. Ideally, // we can use a custom allocator for vector to avoid the copy. section.content = llvm::makeArrayRef(content, contentSize); - appendRelocations(section.relocations, mb->getBuffer(), - swap, isBigEndianArch, read32(swap, sect->reloff), - read32(swap, sect->nreloc)); + appendRelocations(section.relocations, mb->getBuffer(), isBig, + read32(§->reloff, isBig), + read32(§->nreloc, isBig)); if (section.type == S_NON_LAZY_SYMBOL_POINTERS) { appendIndirectSymbols(section.indirectSymbols, mb->getBuffer(), - swap, isBigEndianArch, + isBig, indirectSymbolTableOffset, indirectSymbolTableCount, - read32(swap, sect->reserved1), contentSize/4); + read32(§->reserved1, isBig), + contentSize/4); } f->sections.push_back(section); } @@ -327,9 +326,7 @@ if (!is64) { const segment_command *seg = reinterpret_cast(lc); - const unsigned sectionCount = (swap - ? llvm::sys::getSwappedBytes(seg->nsects) - : seg->nsects); + const unsigned sectionCount = read32(&seg->nsects, isBig); const section *sects = reinterpret_cast (lc + sizeof(segment_command)); const unsigned lcSize = sizeof(segment_command) @@ -342,26 +339,26 @@ Section section; section.segmentName = getString16(sect->segname); section.sectionName = getString16(sect->sectname); - section.type = (SectionType)(read32(swap, sect->flags) - & SECTION_TYPE); - section.attributes = read32(swap, sect->flags) & SECTION_ATTRIBUTES; - section.alignment = read32(swap, sect->align); - section.address = read32(swap, sect->addr); + section.type = (SectionType)(read32(§->flags, isBig) & + SECTION_TYPE); + section.attributes = + read32((uint8_t *)§->flags, isBig) & SECTION_ATTRIBUTES; + section.alignment = read32(§->align, isBig); + section.address = read32(§->addr, isBig); const uint8_t *content = - (uint8_t *)start + read32(swap, sect->offset); - size_t contentSize = read32(swap, sect->size); + (uint8_t *)start + read32(§->offset, isBig); + size_t contentSize = read32(§->size, isBig); // Note: this assign() is copying the content bytes. Ideally, // we can use a custom allocator for vector to avoid the copy. section.content = llvm::makeArrayRef(content, contentSize); - appendRelocations(section.relocations, mb->getBuffer(), - swap, isBigEndianArch, read32(swap, sect->reloff), - read32(swap, sect->nreloc)); + appendRelocations(section.relocations, mb->getBuffer(), isBig, + read32(§->reloff, isBig), + read32(§->nreloc, isBig)); if (section.type == S_NON_LAZY_SYMBOL_POINTERS) { - appendIndirectSymbols(section.indirectSymbols, mb->getBuffer(), - swap, isBigEndianArch, - indirectSymbolTableOffset, - indirectSymbolTableCount, - read32(swap, sect->reserved1), contentSize/4); + appendIndirectSymbols( + section.indirectSymbols, mb->getBuffer(), isBig, + indirectSymbolTableOffset, indirectSymbolTableCount, + read32(§->reserved1, isBig), contentSize / 4); } f->sections.push_back(section); } @@ -369,15 +366,16 @@ break; case LC_SYMTAB: { const symtab_command *st = reinterpret_cast(lc); - const char *strings = start + read32(swap, st->stroff); - const uint32_t strSize = read32(swap, st->strsize); + const char *strings = start + read32(&st->stroff, isBig); + const uint32_t strSize = read32(&st->strsize, isBig); // Validate string pool and symbol table all in buffer. - if ( read32(swap, st->stroff)+read32(swap, st->strsize) - > objSize ) + if (read32((uint8_t *)&st->stroff, isBig) + + read32((uint8_t *)&st->strsize, isBig) > + objSize) return true; if (is64) { - const uint32_t symOffset = read32(swap, st->symoff); - const uint32_t symCount = read32(swap, st->nsyms); + const uint32_t symOffset = read32(&st->symoff, isBig); + const uint32_t symCount = read32(&st->nsyms, isBig); if ( symOffset+(symCount*sizeof(nlist_64)) > objSize) return true; const nlist_64 *symbols = @@ -386,7 +384,7 @@ for(uint32_t i=0; i < symCount; ++i) { const nlist_64 *sin = &symbols[i]; nlist_64 tempSym; - if (swap) { + if (isBig != llvm::sys::IsBigEndianHost) { tempSym = *sin; swapStruct(tempSym); sin = &tempSym; } Symbol sout; @@ -406,8 +404,8 @@ f->localSymbols.push_back(sout); } } else { - const uint32_t symOffset = read32(swap, st->symoff); - const uint32_t symCount = read32(swap, st->nsyms); + const uint32_t symOffset = read32(&st->symoff, isBig); + const uint32_t symCount = read32(&st->nsyms, isBig); if ( symOffset+(symCount*sizeof(nlist)) > objSize) return true; const nlist *symbols = @@ -416,7 +414,7 @@ for(uint32_t i=0; i < symCount; ++i) { const nlist *sin = &symbols[i]; nlist tempSym; - if (swap) { + if (isBig != llvm::sys::IsBigEndianHost) { tempSym = *sin; swapStruct(tempSym); sin = &tempSym; } Symbol sout; @@ -440,15 +438,15 @@ break; case LC_ID_DYLIB: { const dylib_command *dl = reinterpret_cast(lc); - f->installName = lc + read32(swap, dl->dylib.name); + f->installName = lc + read32(&dl->dylib.name, isBig); } break; case LC_DATA_IN_CODE: { const linkedit_data_command *ldc = reinterpret_cast(lc); - dataInCode = reinterpret_cast( - start + read32(swap, ldc->dataoff)); - dataInCodeSize = read32(swap, ldc->datasize); + dataInCode = reinterpret_cast( + start + read32(&ldc->dataoff, isBig)); + dataInCodeSize = read32(&ldc->datasize, isBig); } break; case LC_LOAD_DYLIB: @@ -457,7 +455,7 @@ case LC_LOAD_UPWARD_DYLIB: { const dylib_command *dl = reinterpret_cast(lc); DependentDylib entry; - entry.path = lc + read32(swap, dl->dylib.name); + entry.path = lc + read32(&dl->dylib.name, isBig); entry.kind = LoadCommandType(cmd); f->dependentDylibs.push_back(entry); } @@ -476,9 +474,10 @@ // Convert on-disk data_in_code_entry array to DataInCode vector. for (unsigned i=0; i < dataInCodeSize/sizeof(data_in_code_entry); ++i) { DataInCode entry; - entry.offset = read32(swap, dataInCode[i].offset); - entry.length = read16(swap, dataInCode[i].length); - entry.kind = (DataRegionType)read16(swap, dataInCode[i].kind); + entry.offset = read32(&dataInCode[i].offset, isBig); + entry.length = read16(&dataInCode[i].length, isBig); + entry.kind = + (DataRegionType)read16((uint8_t *)&dataInCode[i].kind, isBig); f->dataInCode.push_back(entry); } } Index: lib/ReaderWriter/MachO/MachONormalizedFileBinaryUtils.h =================================================================== --- lib/ReaderWriter/MachO/MachONormalizedFileBinaryUtils.h +++ lib/ReaderWriter/MachO/MachONormalizedFileBinaryUtils.h @@ -13,6 +13,7 @@ #include "lld/Core/LLVM.h" #include "llvm/ADT/StringRef.h" #include "llvm/Support/Casting.h" +#include "llvm/Support/Endian.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/Host.h" #include "llvm/Support/MachO.h" @@ -27,28 +28,54 @@ using llvm::sys::getSwappedBytes; -inline uint16_t read16(bool swap, uint16_t value) { - return (swap ? getSwappedBytes(value) : value); + using llvm::support::ubig16_t; + using llvm::support::ubig32_t; + using llvm::support::ubig64_t; + + using llvm::support::ulittle16_t; + using llvm::support::ulittle32_t; + using llvm::support::ulittle64_t; + +template +static inline uint16_t read16(const T *loc, bool isBig) { + assert((uint64_t)loc % llvm::alignOf() == 0 && + "invalid pointer alignment"); + return isBig ? *(ubig16_t *)loc : *(ulittle16_t *)loc; } -inline uint32_t read32(bool swap, uint32_t value) { - return (swap ? getSwappedBytes(value) : value); +template +static inline uint32_t read32(const T *loc, bool isBig) { + assert((uint64_t)loc % llvm::alignOf() == 0 && + "invalid pointer alignment"); + return isBig ? *(ubig32_t *)loc : *(ulittle32_t *)loc; } -inline uint64_t read64(bool swap, uint64_t value) { - return (swap ? getSwappedBytes(value) : value); +template +static inline uint64_t read64(const T *loc, bool isBig) { + assert((uint64_t)loc % llvm::alignOf() == 0 && + "invalid pointer alignment"); + return isBig ? *(ubig64_t *)loc : *(ulittle64_t *)loc; } -inline void write16(int16_t &loc, bool swap, int16_t value) { - loc = (swap ? getSwappedBytes(value) : value); +inline void write16(uint8_t *loc, uint16_t value, bool isBig) { + if (isBig) + *(ubig16_t *)loc = value; + else + *(ulittle16_t *)loc = value; } -inline void write32(int32_t &loc, bool swap, int32_t value) { - loc = (swap ? getSwappedBytes(value) : value); +inline void write32(uint8_t *loc, uint32_t value, bool isBig) { + if (isBig) + *(ubig32_t *)loc = value; + else + *(ulittle32_t *)loc = value; } -inline void write64(uint64_t &loc, bool swap, uint64_t value) { - loc = (swap ? getSwappedBytes(value) : value); +inline void write64(uint8_t *loc, uint64_t value, bool isBig) { + if (isBig) + *(ubig64_t *)loc = value; + else + *(ulittle64_t *)loc = value; } inline uint32_t @@ -69,11 +96,10 @@ bits |= (newBits << shift); } -inline Relocation -unpackRelocation(const llvm::MachO::any_relocation_info &r, bool swap, - bool isBigEndian) { - uint32_t r0 = read32(swap, r.r_word0); - uint32_t r1 = read32(swap, r.r_word1); +inline Relocation unpackRelocation(const llvm::MachO::any_relocation_info &r, + bool isBigEndian) { + uint32_t r0 = read32(&r.r_word0, isBigEndian); + uint32_t r1 = read32(&r.r_word1, isBigEndian); Relocation result; if (r0 & llvm::MachO::R_SCATTERED) { Index: lib/ReaderWriter/MachO/MachONormalizedFileToAtoms.cpp =================================================================== --- lib/ReaderWriter/MachO/MachONormalizedFileToAtoms.cpp +++ lib/ReaderWriter/MachO/MachONormalizedFileToAtoms.cpp @@ -350,7 +350,7 @@ const NormalizedFile &normalizedFile, MachOFile &file, bool copyRefs) { const bool is64 = MachOLinkingContext::is64Bit(normalizedFile.arch); - const bool swap = !MachOLinkingContext::isHostEndian(normalizedFile.arch); + const bool isBig = MachOLinkingContext::isBigEndian(normalizedFile.arch); // Get info on how to atomize section. unsigned int sizeMultiple; @@ -408,7 +408,7 @@ case atomizeCFI: // Break section up into dwarf unwind CFIs (FDE or CIE). cfi = reinterpret_cast(§ion.content[offset]); - size = read32(swap, *cfi) + 4; + size = read32(§ion.content[offset], isBig) + 4; if (offset+size > section.content.size()) { return make_dynamic_error_code(Twine(Twine("Section ") + section.segmentName @@ -556,7 +556,7 @@ } }; - const bool swap = !MachOLinkingContext::isHostEndian(normalizedFile.arch); + const bool isBig = MachOLinkingContext::isBigEndian(normalizedFile.arch); // Use old-school iterator so that paired relocations can be grouped. for (auto it=section.relocations.begin(), e=section.relocations.end(); it != e; ++it) { @@ -579,17 +579,16 @@ std::error_code relocErr; if (handler.isPairedReloc(reloc)) { // Handle paired relocations together. - relocErr = handler.getPairReferenceInfo(reloc, *++it, inAtom, - offsetInAtom, fixupAddress, swap, - atomByAddr, atomBySymbol, &kind, - &target, &addend); + relocErr = handler.getPairReferenceInfo( + reloc, *++it, inAtom, offsetInAtom, fixupAddress, isBig, atomByAddr, + atomBySymbol, &kind, &target, &addend); } else { // Use ArchHandler to convert relocation record into information // needed to instantiate an lld::Reference object. - relocErr = handler.getReferenceInfo(reloc, inAtom, offsetInAtom, - fixupAddress,swap, atomByAddr, - atomBySymbol, &kind, &target, &addend); + relocErr = handler.getReferenceInfo( + reloc, inAtom, offsetInAtom, fixupAddress, isBig, atomByAddr, + atomBySymbol, &kind, &target, &addend); } if (relocErr) { return make_dynamic_error_code( @@ -621,18 +620,18 @@ return section.segmentName.equals("__DWARF"); } -static int64_t readSPtr(bool is64, bool swap, const uint8_t *addr) { +static int64_t readSPtr(bool is64, bool isBig, const uint8_t *addr) { if (is64) - return read64(swap, *reinterpret_cast(addr)); + return read64(addr, isBig); - int32_t res = read32(swap, *reinterpret_cast(addr)); + int32_t res = read32(addr, isBig); return res; } std::error_code addEHFrameReferences(const NormalizedFile &normalizedFile, MachOFile &file, mach_o::ArchHandler &handler) { - const bool swap = !MachOLinkingContext::isHostEndian(normalizedFile.arch); + const bool isBig = MachOLinkingContext::isBigEndian(normalizedFile.arch); const bool is64 = MachOLinkingContext::is64Bit(normalizedFile.arch); const Section *ehFrameSection = nullptr; @@ -651,7 +650,7 @@ [&](MachODefinedAtom *atom, uint64_t offset) -> void { assert(atom->contentType() == DefinedAtom::typeCFI); - if (ArchHandler::isDwarfCIE(swap, atom)) + if (ArchHandler::isDwarfCIE(isBig, atom)) return; // Compiler wasn't lazy and actually told us what it meant. @@ -659,14 +658,14 @@ return; const uint8_t *frameData = atom->rawContent().data(); - uint32_t size = read32(swap, *(uint32_t *)frameData); + uint32_t size = read32(frameData, isBig); uint64_t cieFieldInFDE = size == 0xffffffffU ? sizeof(uint32_t) + sizeof(uint64_t) : sizeof(uint32_t); // Linker needs to fixup a reference from the FDE to its parent CIE (a // 32-bit byte offset backwards in the __eh_frame section). - uint32_t cieDelta = read32(swap, *(uint32_t *)(frameData + cieFieldInFDE)); + uint32_t cieDelta = read32(frameData + cieFieldInFDE, isBig); uint64_t cieAddress = ehFrameSection->address + offset + cieFieldInFDE; cieAddress -= cieDelta; @@ -682,7 +681,7 @@ // (hopefully) uint64_t rangeFieldInFDE = cieFieldInFDE + sizeof(uint32_t); - int64_t functionFromFDE = readSPtr(is64, swap, frameData + rangeFieldInFDE); + int64_t functionFromFDE = readSPtr(is64, isBig, frameData + rangeFieldInFDE); uint64_t rangeStart = ehFrameSection->address + offset + rangeFieldInFDE; rangeStart += functionFromFDE;