diff --git a/lld/MachO/Arch/ARM64.cpp b/lld/MachO/Arch/ARM64.cpp --- a/lld/MachO/Arch/ARM64.cpp +++ b/lld/MachO/Arch/ARM64.cpp @@ -39,7 +39,6 @@ uint64_t entryAddr) const override; void relaxGotLoad(uint8_t *loc, uint8_t type) const override; - const RelocAttrs &getRelocAttrs(uint8_t type) const override; uint64_t getPageSize() const override { return 16 * 1024; } }; @@ -52,30 +51,24 @@ // are weird -- it results in the value of the GOT slot being written, instead // of the address. Let's not support it unless we find a real-world use case. -const RelocAttrs &ARM64::getRelocAttrs(uint8_t type) const { - static const std::array relocAttrsArray{{ +static const std::array ARM64RelocAttrsArray{{ #define B(x) RelocAttrBits::x - {"UNSIGNED", B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | - B(DYSYM8) | B(BYTE4) | B(BYTE8)}, - {"SUBTRACTOR", B(SUBTRAHEND) | B(BYTE4) | B(BYTE8)}, - {"BRANCH26", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)}, - {"PAGE21", B(PCREL) | B(EXTERN) | B(BYTE4)}, - {"PAGEOFF12", B(ABSOLUTE) | B(EXTERN) | B(BYTE4)}, - {"GOT_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(GOT) | B(BYTE4)}, - {"GOT_LOAD_PAGEOFF12", - B(ABSOLUTE) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)}, - {"POINTER_TO_GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)}, - {"TLVP_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(TLV) | B(BYTE4)}, - {"TLVP_LOAD_PAGEOFF12", - B(ABSOLUTE) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)}, - {"ADDEND", B(ADDEND)}, + {"UNSIGNED", B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(DYSYM8) | + B(BYTE4) | B(BYTE8)}, + {"SUBTRACTOR", B(SUBTRAHEND) | B(BYTE4) | B(BYTE8)}, + {"BRANCH26", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)}, + {"PAGE21", B(PCREL) | B(EXTERN) | B(BYTE4)}, + {"PAGEOFF12", B(ABSOLUTE) | B(EXTERN) | B(BYTE4)}, + {"GOT_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(GOT) | B(BYTE4)}, + {"GOT_LOAD_PAGEOFF12", + B(ABSOLUTE) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)}, + {"POINTER_TO_GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)}, + {"TLVP_LOAD_PAGE21", B(PCREL) | B(EXTERN) | B(TLV) | B(BYTE4)}, + {"TLVP_LOAD_PAGEOFF12", + B(ABSOLUTE) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)}, + {"ADDEND", B(ADDEND)}, #undef B - }}; - assert(type < relocAttrsArray.size() && "invalid relocation type"); - if (type >= relocAttrsArray.size()) - return invalidRelocAttrs; - return relocAttrsArray[type]; -} +}}; int64_t ARM64::getEmbeddedAddend(MemoryBufferRef mb, const section_64 &sec, const relocation_info rel) const { @@ -298,6 +291,9 @@ stubSize = sizeof(stubCode); stubHelperHeaderSize = sizeof(stubHelperHeaderCode); stubHelperEntrySize = sizeof(stubHelperEntryCode); + + relocAttrsArray = ARM64RelocAttrsArray.data(); + relocAttrsArraySize = ARM64RelocAttrsArray.size(); } TargetInfo *macho::createARM64TargetInfo() { diff --git a/lld/MachO/Arch/X86_64.cpp b/lld/MachO/Arch/X86_64.cpp --- a/lld/MachO/Arch/X86_64.cpp +++ b/lld/MachO/Arch/X86_64.cpp @@ -36,33 +36,26 @@ uint64_t entryAddr) const override; void relaxGotLoad(uint8_t *loc, uint8_t type) const override; - const RelocAttrs &getRelocAttrs(uint8_t type) const override; uint64_t getPageSize() const override { return 4 * 1024; } }; } // namespace -const RelocAttrs &X86_64::getRelocAttrs(uint8_t type) const { - static const std::array relocAttrsArray{{ +static const std::array X86_64RelocAttrsArray{{ #define B(x) RelocAttrBits::x - {"UNSIGNED", B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | - B(DYSYM8) | B(BYTE4) | B(BYTE8)}, - {"SIGNED", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)}, - {"BRANCH", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)}, - {"GOT_LOAD", B(PCREL) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)}, - {"GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)}, - {"SUBTRACTOR", B(SUBTRAHEND) | B(BYTE4) | B(BYTE8)}, - {"SIGNED_1", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)}, - {"SIGNED_2", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)}, - {"SIGNED_4", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)}, - {"TLV", B(PCREL) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)}, + {"UNSIGNED", B(UNSIGNED) | B(ABSOLUTE) | B(EXTERN) | B(LOCAL) | B(DYSYM8) | + B(BYTE4) | B(BYTE8)}, + {"SIGNED", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)}, + {"BRANCH", B(PCREL) | B(EXTERN) | B(BRANCH) | B(BYTE4)}, + {"GOT_LOAD", B(PCREL) | B(EXTERN) | B(GOT) | B(LOAD) | B(BYTE4)}, + {"GOT", B(PCREL) | B(EXTERN) | B(GOT) | B(POINTER) | B(BYTE4)}, + {"SUBTRACTOR", B(SUBTRAHEND) | B(BYTE4) | B(BYTE8)}, + {"SIGNED_1", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)}, + {"SIGNED_2", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)}, + {"SIGNED_4", B(PCREL) | B(EXTERN) | B(LOCAL) | B(BYTE4)}, + {"TLV", B(PCREL) | B(EXTERN) | B(TLV) | B(LOAD) | B(BYTE4)}, #undef B - }}; - assert(type < relocAttrsArray.size() && "invalid relocation type"); - if (type >= relocAttrsArray.size()) - return invalidRelocAttrs; - return relocAttrsArray[type]; -} +}}; static int pcrelOffset(uint8_t type) { switch (type) { @@ -189,6 +182,9 @@ stubSize = sizeof(stub); stubHelperHeaderSize = sizeof(stubHelperHeader); stubHelperEntrySize = sizeof(stubHelperEntry); + + relocAttrsArray = X86_64RelocAttrsArray.data(); + relocAttrsArraySize = X86_64RelocAttrsArray.size(); } TargetInfo *macho::createX86_64TargetInfo() { diff --git a/lld/MachO/InputFiles.cpp b/lld/MachO/InputFiles.cpp --- a/lld/MachO/InputFiles.cpp +++ b/lld/MachO/InputFiles.cpp @@ -203,7 +203,7 @@ static bool validateRelocationInfo(InputFile *file, const section_64 &sec, relocation_info rel) { - const RelocAttrs &relocAttrs = target->getRelocAttrs(rel.r_type); + const RelocAttrs &relocAttrs = getRelocAttrs(rel.r_type); bool valid = true; auto message = [relocAttrs, file, sec, rel, &valid](const Twine &diagnostic) { valid = false; @@ -266,7 +266,7 @@ int64_t pairedAddend = 0; relocation_info relInfo = relInfos[i]; - if (target->hasAttr(relInfo.r_type, RelocAttrBits::ADDEND)) { + if (hasRelocAttr(relInfo.r_type, RelocAttrBits::ADDEND)) { pairedAddend = SignExtend64<24>(relInfo.r_symbolnum); relInfo = relInfos[++i]; } @@ -298,7 +298,7 @@ // FIXME This logic was written around x86_64 behavior -- ARM64 doesn't // have pcrel section relocations. We may want to factor this out into // the arch-specific .cpp file. - assert(target->hasAttr(r.type, RelocAttrBits::BYTE4)); + assert(hasRelocAttr(r.type, RelocAttrBits::BYTE4)); referentOffset = sec.addr + relInfo.r_address + 4 + totalAddend - referentSec.addr; } else { @@ -312,11 +312,11 @@ InputSection *subsec = findContainingSubsection(subsecMap, &r.offset); subsec->relocs.push_back(r); - if (target->hasAttr(r.type, RelocAttrBits::SUBTRAHEND)) { + if (hasRelocAttr(r.type, RelocAttrBits::SUBTRAHEND)) { relInfo = relInfos[++i]; // SUBTRACTOR relocations should always be followed by an UNSIGNED one // indicating the minuend symbol. - assert(target->hasAttr(relInfo.r_type, RelocAttrBits::UNSIGNED) && + assert(hasRelocAttr(relInfo.r_type, RelocAttrBits::UNSIGNED) && relInfo.r_extern); Reloc p; p.type = relInfo.r_type; diff --git a/lld/MachO/InputSection.cpp b/lld/MachO/InputSection.cpp --- a/lld/MachO/InputSection.cpp +++ b/lld/MachO/InputSection.cpp @@ -36,7 +36,7 @@ static uint64_t resolveSymbolVA(uint8_t *loc, const lld::macho::Symbol &sym, uint8_t type) { - const RelocAttrs &relocAttrs = target->getRelocAttrs(type); + const RelocAttrs &relocAttrs = getRelocAttrs(type); if (relocAttrs.hasAttr(RelocAttrBits::BRANCH)) { if (sym.isInStubs()) return in.stubs->addr + sym.stubsIndex * target->stubSize; @@ -61,13 +61,12 @@ const Reloc &r = relocs[i]; uint8_t *loc = buf + r.offset; uint64_t referentVA = 0; - if (target->hasAttr(r.type, RelocAttrBits::SUBTRAHEND)) { + if (hasRelocAttr(r.type, RelocAttrBits::SUBTRAHEND)) { const Symbol *fromSym = r.referent.get(); const Symbol *toSym = relocs[++i].referent.get(); referentVA = toSym->getVA() - fromSym->getVA(); } else if (auto *referentSym = r.referent.dyn_cast()) { - if (target->hasAttr(r.type, RelocAttrBits::LOAD) && - !referentSym->isInGot()) + if (hasRelocAttr(r.type, RelocAttrBits::LOAD) && !referentSym->isInGot()) target->relaxGotLoad(loc, r.type); referentVA = resolveSymbolVA(loc, *referentSym, r.type); diff --git a/lld/MachO/Relocations.cpp b/lld/MachO/Relocations.cpp --- a/lld/MachO/Relocations.cpp +++ b/lld/MachO/Relocations.cpp @@ -19,7 +19,7 @@ bool macho::validateSymbolRelocation(const Symbol *sym, const InputSection *isec, const Reloc &r) { - const RelocAttrs &relocAttrs = target->getRelocAttrs(r.type); + const RelocAttrs &relocAttrs = getRelocAttrs(r.type); bool valid = true; auto message = [relocAttrs, sym, isec, &valid](const Twine &diagnostic) { valid = false; @@ -45,9 +45,8 @@ if (auto *sym = r.referent.dyn_cast()) hint = "; references " + toString(*sym); // TODO: get location of reloc using something like LLD-ELF's getErrorPlace() - error("relocation " + target->getRelocAttrs(r.type).name + - " is out of range: " + v + " is not in [" + Twine(min) + ", " + - Twine(max) + "]" + hint); + error("relocation " + getRelocAttrs(r.type).name + " is out of range: " + v + + " is not in [" + Twine(min) + ", " + Twine(max) + "]" + hint); } void macho::reportRangeError(SymbolDiagnostic d, const Twine &v, uint8_t bits, diff --git a/lld/MachO/Target.h b/lld/MachO/Target.h --- a/lld/MachO/Target.h +++ b/lld/MachO/Target.h @@ -13,6 +13,7 @@ #include "llvm/ADT/BitmaskEnum.h" #include "llvm/BinaryFormat/MachO.h" +#include "llvm/Support/Compiler.h" #include "llvm/Support/MemoryBuffer.h" #include @@ -59,14 +60,8 @@ // on a level of address indirection. virtual void relaxGotLoad(uint8_t *loc, uint8_t type) const = 0; - virtual const RelocAttrs &getRelocAttrs(uint8_t type) const = 0; - virtual uint64_t getPageSize() const = 0; - bool hasAttr(uint8_t type, RelocAttrBits bit) const { - return getRelocAttrs(type).hasAttr(bit); - } - bool validateRelocationInfo(llvm::MemoryBufferRef, const llvm::MachO::section_64 &sec, llvm::MachO::relocation_info); @@ -85,6 +80,22 @@ extern TargetInfo *target; +// These values are target-specific, but we have hoisted them out of Target +// because they are accessed in hot loops. Avoiding the extra indirection is a +// perf win. +extern size_t relocAttrsArraySize; +extern const RelocAttrs *relocAttrsArray; + +inline const RelocAttrs &getRelocAttrs(uint8_t type) { + if (LLVM_UNLIKELY(type >= relocAttrsArraySize)) + return invalidRelocAttrs; + return relocAttrsArray[type]; +} + +inline bool hasRelocAttr(uint8_t type, RelocAttrBits bit) { + return getRelocAttrs(type).hasAttr(bit); +} + } // namespace macho } // namespace lld diff --git a/lld/MachO/Target.cpp b/lld/MachO/Target.cpp --- a/lld/MachO/Target.cpp +++ b/lld/MachO/Target.cpp @@ -12,3 +12,5 @@ using namespace lld::macho; TargetInfo *macho::target = nullptr; +size_t macho::relocAttrsArraySize = 0; +const RelocAttrs *macho::relocAttrsArray = nullptr; diff --git a/lld/MachO/UnwindInfoSection.cpp b/lld/MachO/UnwindInfoSection.cpp --- a/lld/MachO/UnwindInfoSection.cpp +++ b/lld/MachO/UnwindInfoSection.cpp @@ -113,7 +113,7 @@ isec->name == section_names::compactUnwind); for (Reloc &r : isec->relocs) { - assert(target->hasAttr(r.type, RelocAttrBits::UNSIGNED)); + assert(hasRelocAttr(r.type, RelocAttrBits::UNSIGNED)); if (r.offset % sizeof(CompactUnwindEntry64) != offsetof(struct CompactUnwindEntry64, personality)) continue; diff --git a/lld/MachO/Writer.cpp b/lld/MachO/Writer.cpp --- a/lld/MachO/Writer.cpp +++ b/lld/MachO/Writer.cpp @@ -444,7 +444,7 @@ static void prepareSymbolRelocation(lld::macho::Symbol *sym, const InputSection *isec, const Reloc &r) { - const RelocAttrs &relocAttrs = target->getRelocAttrs(r.type); + const RelocAttrs &relocAttrs = getRelocAttrs(r.type); if (relocAttrs.hasAttr(RelocAttrBits::BRANCH)) { prepareBranchTarget(sym); @@ -472,7 +472,7 @@ for (auto it = isec->relocs.begin(); it != isec->relocs.end(); ++it) { Reloc &r = *it; - if (target->hasAttr(r.type, RelocAttrBits::SUBTRAHEND)) { + if (hasRelocAttr(r.type, RelocAttrBits::SUBTRAHEND)) { // Skip over the following UNSIGNED relocation -- it's just there as the // minuend, and doesn't have the usual UNSIGNED semantics. We don't want // to emit rebase opcodes for it.