diff --git a/lld/ELF/Arch/PPC64.cpp b/lld/ELF/Arch/PPC64.cpp index 0764dabe45f8..cf58b322bb3a 100644 --- a/lld/ELF/Arch/PPC64.cpp +++ b/lld/ELF/Arch/PPC64.cpp @@ -1,1234 +1,1239 @@ //===- PPC64.cpp ----------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===----------------------------------------------------------------------===// #include "SymbolTable.h" #include "Symbols.h" #include "SyntheticSections.h" #include "Target.h" #include "Thunks.h" #include "lld/Common/ErrorHandler.h" #include "lld/Common/Memory.h" #include "llvm/Support/Endian.h" using namespace llvm; using namespace llvm::object; using namespace llvm::support::endian; using namespace llvm::ELF; using namespace lld; using namespace lld::elf; static uint64_t ppc64TocOffset = 0x8000; static uint64_t dynamicThreadPointerOffset = 0x8000; // The instruction encoding of bits 21-30 from the ISA for the Xform and Dform // instructions that can be used as part of the initial exec TLS sequence. enum XFormOpcd { LBZX = 87, LHZX = 279, LWZX = 23, LDX = 21, STBX = 215, STHX = 407, STWX = 151, STDX = 149, ADD = 266, }; enum DFormOpcd { LBZ = 34, LBZU = 35, LHZ = 40, LHZU = 41, LHAU = 43, LWZ = 32, LWZU = 33, LFSU = 49, LD = 58, LFDU = 51, STB = 38, STBU = 39, STH = 44, STHU = 45, STW = 36, STWU = 37, STFSU = 53, STFDU = 55, STD = 62, ADDI = 14 }; uint64_t elf::getPPC64TocBase() { // The TOC consists of sections .got, .toc, .tocbss, .plt in that order. The // TOC starts where the first of these sections starts. We always create a // .got when we see a relocation that uses it, so for us the start is always // the .got. uint64_t tocVA = in.got->getVA(); // Per the ppc64-elf-linux ABI, The TOC base is TOC value plus 0x8000 // thus permitting a full 64 Kbytes segment. Note that the glibc startup // code (crt1.o) assumes that you can get from the TOC base to the // start of the .toc section with only a single (signed) 16-bit relocation. return tocVA + ppc64TocOffset; } unsigned elf::getPPC64GlobalEntryToLocalEntryOffset(uint8_t stOther) { // The offset is encoded into the 3 most significant bits of the st_other // field, with some special values described in section 3.4.1 of the ABI: // 0 --> Zero offset between the GEP and LEP, and the function does NOT use // the TOC pointer (r2). r2 will hold the same value on returning from // the function as it did on entering the function. // 1 --> Zero offset between the GEP and LEP, and r2 should be treated as a // caller-saved register for all callers. // 2-6 --> The binary logarithm of the offset eg: // 2 --> 2^2 = 4 bytes --> 1 instruction. // 6 --> 2^6 = 64 bytes --> 16 instructions. // 7 --> Reserved. uint8_t gepToLep = (stOther >> 5) & 7; if (gepToLep < 2) return 0; // The value encoded in the st_other bits is the // log-base-2(offset). if (gepToLep < 7) return 1 << gepToLep; error("reserved value of 7 in the 3 most-significant-bits of st_other"); return 0; } bool elf::isPPC64SmallCodeModelTocReloc(RelType type) { // The only small code model relocations that access the .toc section. return type == R_PPC64_TOC16 || type == R_PPC64_TOC16_DS; } static bool addOptional(StringRef name, uint64_t value, std::vector &defined) { Symbol *sym = symtab->find(name); if (!sym || sym->isDefined()) return false; sym->resolve(Defined{/*file=*/nullptr, saver.save(name), STB_GLOBAL, STV_HIDDEN, STT_FUNC, value, /*size=*/0, /*section=*/nullptr}); defined.push_back(cast(sym)); return true; } // If from is 14, write ${prefix}14: firstInsn; ${prefix}15: // firstInsn+0x200008; ...; ${prefix}31: firstInsn+(31-14)*0x200008; $tail // The labels are defined only if they exist in the symbol table. static void writeSequence(MutableArrayRef buf, const char *prefix, int from, uint32_t firstInsn, ArrayRef tail) { std::vector defined; char name[16]; int first; uint32_t *ptr = buf.data(); for (int r = from; r < 32; ++r) { format("%s%d", prefix, r).snprint(name, sizeof(name)); if (addOptional(name, 4 * (r - from), defined) && defined.size() == 1) first = r - from; write32(ptr++, firstInsn + 0x200008 * (r - from)); } for (uint32_t insn : tail) write32(ptr++, insn); assert(ptr == &*buf.end()); if (defined.empty()) return; // The full section content has the extent of [begin, end). We drop unused // instructions and write [first,end). auto *sec = make( nullptr, SHF_ALLOC, SHT_PROGBITS, 4, makeArrayRef(reinterpret_cast(buf.data() + first), 4 * (buf.size() - first)), ".text"); inputSections.push_back(sec); for (Defined *sym : defined) { sym->section = sec; sym->value -= 4 * first; } } // Implements some save and restore functions as described by ELF V2 ABI to be // compatible with GCC. With GCC -Os, when the number of call-saved registers // exceeds a certain threshold, GCC generates _savegpr0_* _restgpr0_* calls and // expects the linker to define them. See // https://sourceware.org/pipermail/binutils/2002-February/017444.html and // https://sourceware.org/pipermail/binutils/2004-August/036765.html . This is // weird because libgcc.a would be the natural place. The linker generation // approach has the advantage that the linker can generate multiple copies to // avoid long branch thunks. However, we don't consider the advantage // significant enough to complicate our trunk implementation, so we take the // simple approach and synthesize .text sections providing the implementation. void elf::addPPC64SaveRestore() { static uint32_t savegpr0[20], restgpr0[21], savegpr1[19], restgpr1[19]; constexpr uint32_t blr = 0x4e800020, mtlr_0 = 0x7c0803a6; // _restgpr0_14: ld 14, -144(1); _restgpr0_15: ld 15, -136(1); ... // Tail: ld 0, 16(1); mtlr 0; blr writeSequence(restgpr0, "_restgpr0_", 14, 0xe9c1ff70, {0xe8010010, mtlr_0, blr}); // _restgpr1_14: ld 14, -144(12); _restgpr1_15: ld 15, -136(12); ... // Tail: blr writeSequence(restgpr1, "_restgpr1_", 14, 0xe9ccff70, {blr}); // _savegpr0_14: std 14, -144(1); _savegpr0_15: std 15, -136(1); ... // Tail: std 0, 16(1); blr writeSequence(savegpr0, "_savegpr0_", 14, 0xf9c1ff70, {0xf8010010, blr}); // _savegpr1_14: std 14, -144(12); _savegpr1_15: std 15, -136(12); ... // Tail: blr writeSequence(savegpr1, "_savegpr1_", 14, 0xf9ccff70, {blr}); } // Find the R_PPC64_ADDR64 in .rela.toc with matching offset. template static std::pair getRelaTocSymAndAddend(InputSectionBase *tocSec, uint64_t offset) { if (tocSec->numRelocations == 0) return {}; // .rela.toc contains exclusively R_PPC64_ADDR64 relocations sorted by // r_offset: 0, 8, 16, etc. For a given Offset, Offset / 8 gives us the // relocation index in most cases. // // In rare cases a TOC entry may store a constant that doesn't need an // R_PPC64_ADDR64, the corresponding r_offset is therefore missing. Offset / 8 // points to a relocation with larger r_offset. Do a linear probe then. // Constants are extremely uncommon in .toc and the extra number of array // accesses can be seen as a small constant. ArrayRef relas = tocSec->template relas(); uint64_t index = std::min(offset / 8, relas.size() - 1); for (;;) { if (relas[index].r_offset == offset) { Symbol &sym = tocSec->getFile()->getRelocTargetSym(relas[index]); return {dyn_cast(&sym), getAddend(relas[index])}; } if (relas[index].r_offset < offset || index == 0) break; --index; } return {}; } // When accessing a symbol defined in another translation unit, compilers // reserve a .toc entry, allocate a local label and generate toc-indirect // instructions: // // addis 3, 2, .LC0@toc@ha # R_PPC64_TOC16_HA // ld 3, .LC0@toc@l(3) # R_PPC64_TOC16_LO_DS, load the address from a .toc entry // ld/lwa 3, 0(3) # load the value from the address // // .section .toc,"aw",@progbits // .LC0: .tc var[TC],var // // If var is defined, non-preemptable and addressable with a 32-bit signed // offset from the toc base, the address of var can be computed by adding an // offset to the toc base, saving a load. // // addis 3,2,var@toc@ha # this may be relaxed to a nop, // addi 3,3,var@toc@l # then this becomes addi 3,2,var@toc // ld/lwa 3, 0(3) # load the value from the address // // Returns true if the relaxation is performed. bool elf::tryRelaxPPC64TocIndirection(const Relocation &rel, uint8_t *bufLoc) { assert(config->tocOptimize); if (rel.addend < 0) return false; // If the symbol is not the .toc section, this isn't a toc-indirection. Defined *defSym = dyn_cast(rel.sym); if (!defSym || !defSym->isSection() || defSym->section->name != ".toc") return false; Defined *d; int64_t addend; auto *tocISB = cast(defSym->section); std::tie(d, addend) = config->isLE ? getRelaTocSymAndAddend(tocISB, rel.addend) : getRelaTocSymAndAddend(tocISB, rel.addend); // Only non-preemptable defined symbols can be relaxed. if (!d || d->isPreemptible) return false; // R_PPC64_ADDR64 should have created a canonical PLT for the non-preemptable // ifunc and changed its type to STT_FUNC. assert(!d->isGnuIFunc()); // Two instructions can materialize a 32-bit signed offset from the toc base. uint64_t tocRelative = d->getVA(addend) - getPPC64TocBase(); if (!isInt<32>(tocRelative)) return false; // Add PPC64TocOffset that will be subtracted by PPC64::relocate(). target->relaxGot(bufLoc, rel, tocRelative + ppc64TocOffset); return true; } namespace { class PPC64 final : public TargetInfo { public: PPC64(); int getTlsGdRelaxSkip(RelType type) const override; uint32_t calcEFlags() const override; RelExpr getRelExpr(RelType type, const Symbol &s, const uint8_t *loc) const override; RelType getDynRel(RelType type) const override; void writePltHeader(uint8_t *buf) const override; void writePlt(uint8_t *buf, const Symbol &sym, uint64_t pltEntryAddr) const override; void writeIplt(uint8_t *buf, const Symbol &sym, uint64_t pltEntryAddr) const override; void relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const override; void writeGotHeader(uint8_t *buf) const override; bool needsThunk(RelExpr expr, RelType type, const InputFile *file, uint64_t branchAddr, const Symbol &s, int64_t a) const override; uint32_t getThunkSectionSpacing() const override; bool inBranchRange(RelType type, uint64_t src, uint64_t dst) const override; RelExpr adjustRelaxExpr(RelType type, const uint8_t *data, RelExpr expr) const override; void relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const override; void relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const override; void relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const override; void relaxTlsLdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const override; void relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const override; bool adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end, uint8_t stOther) const override; }; } // namespace // Relocation masks following the #lo(value), #hi(value), #ha(value), // #higher(value), #highera(value), #highest(value), and #highesta(value) // macros defined in section 4.5.1. Relocation Types of the PPC-elf64abi // document. static uint16_t lo(uint64_t v) { return v; } static uint16_t hi(uint64_t v) { return v >> 16; } static uint16_t ha(uint64_t v) { return (v + 0x8000) >> 16; } static uint16_t higher(uint64_t v) { return v >> 32; } static uint16_t highera(uint64_t v) { return (v + 0x8000) >> 32; } static uint16_t highest(uint64_t v) { return v >> 48; } static uint16_t highesta(uint64_t v) { return (v + 0x8000) >> 48; } // Extracts the 'PO' field of an instruction encoding. static uint8_t getPrimaryOpCode(uint32_t encoding) { return (encoding >> 26); } static bool isDQFormInstruction(uint32_t encoding) { switch (getPrimaryOpCode(encoding)) { default: return false; case 56: // The only instruction with a primary opcode of 56 is `lq`. return true; case 61: // There are both DS and DQ instruction forms with this primary opcode. // Namely `lxv` and `stxv` are the DQ-forms that use it. // The DS 'XO' bits being set to 01 is restricted to DQ form. return (encoding & 3) == 0x1; } } static bool isInstructionUpdateForm(uint32_t encoding) { switch (getPrimaryOpCode(encoding)) { default: return false; case LBZU: case LHAU: case LHZU: case LWZU: case LFSU: case LFDU: case STBU: case STHU: case STWU: case STFSU: case STFDU: return true; // LWA has the same opcode as LD, and the DS bits is what differentiates // between LD/LDU/LWA case LD: case STD: return (encoding & 3) == 1; } } // There are a number of places when we either want to read or write an // instruction when handling a half16 relocation type. On big-endian the buffer // pointer is pointing into the middle of the word we want to extract, and on // little-endian it is pointing to the start of the word. These 2 helpers are to // simplify reading and writing in that context. static void writeFromHalf16(uint8_t *loc, uint32_t insn) { write32(config->isLE ? loc : loc - 2, insn); } static uint32_t readFromHalf16(const uint8_t *loc) { return read32(config->isLE ? loc : loc - 2); } // The prefixed instruction is always a 4 byte prefix followed by a 4 byte // instruction. Therefore, the prefix is always in lower memory than the // instruction (regardless of endianness). // As a result, we need to shift the pieces around on little endian machines. static void writePrefixedInstruction(uint8_t *loc, uint64_t insn) { insn = config->isLE ? insn << 32 | insn >> 32 : insn; write64(loc, insn); } static uint64_t readPrefixedInstruction(const uint8_t *loc) { uint64_t fullInstr = read64(loc); return config->isLE ? (fullInstr << 32 | fullInstr >> 32) : fullInstr; } PPC64::PPC64() { copyRel = R_PPC64_COPY; gotRel = R_PPC64_GLOB_DAT; noneRel = R_PPC64_NONE; pltRel = R_PPC64_JMP_SLOT; relativeRel = R_PPC64_RELATIVE; iRelativeRel = R_PPC64_IRELATIVE; symbolicRel = R_PPC64_ADDR64; pltHeaderSize = 60; pltEntrySize = 4; ipltEntrySize = 16; // PPC64PltCallStub::size gotBaseSymInGotPlt = false; gotHeaderEntriesNum = 1; gotPltHeaderEntriesNum = 2; needsThunks = true; tlsModuleIndexRel = R_PPC64_DTPMOD64; tlsOffsetRel = R_PPC64_DTPREL64; tlsGotRel = R_PPC64_TPREL64; needsMoreStackNonSplit = false; // We need 64K pages (at least under glibc/Linux, the loader won't // set different permissions on a finer granularity than that). defaultMaxPageSize = 65536; // The PPC64 ELF ABI v1 spec, says: // // It is normally desirable to put segments with different characteristics // in separate 256 Mbyte portions of the address space, to give the // operating system full paging flexibility in the 64-bit address space. // // And because the lowest non-zero 256M boundary is 0x10000000, PPC64 linkers // use 0x10000000 as the starting address. defaultImageBase = 0x10000000; write32(trapInstr.data(), 0x7fe00008); } int PPC64::getTlsGdRelaxSkip(RelType type) const { // A __tls_get_addr call instruction is marked with 2 relocations: // // R_PPC64_TLSGD / R_PPC64_TLSLD: marker relocation // R_PPC64_REL24: __tls_get_addr // // After the relaxation we no longer call __tls_get_addr and should skip both // relocations to not create a false dependence on __tls_get_addr being // defined. if (type == R_PPC64_TLSGD || type == R_PPC64_TLSLD) return 2; return 1; } static uint32_t getEFlags(InputFile *file) { if (config->ekind == ELF64BEKind) return cast>(file)->getObj().getHeader()->e_flags; return cast>(file)->getObj().getHeader()->e_flags; } // This file implements v2 ABI. This function makes sure that all // object files have v2 or an unspecified version as an ABI version. uint32_t PPC64::calcEFlags() const { for (InputFile *f : objectFiles) { uint32_t flag = getEFlags(f); if (flag == 1) error(toString(f) + ": ABI version 1 is not supported"); else if (flag > 2) error(toString(f) + ": unrecognized e_flags: " + Twine(flag)); } return 2; } void PPC64::relaxGot(uint8_t *loc, const Relocation &rel, uint64_t val) const { switch (rel.type) { case R_PPC64_TOC16_HA: // Convert "addis reg, 2, .LC0@toc@h" to "addis reg, 2, var@toc@h" or "nop". relocate(loc, rel, val); break; case R_PPC64_TOC16_LO_DS: { // Convert "ld reg, .LC0@toc@l(reg)" to "addi reg, reg, var@toc@l" or // "addi reg, 2, var@toc". uint32_t insn = readFromHalf16(loc); if (getPrimaryOpCode(insn) != LD) error("expected a 'ld' for got-indirect to toc-relative relaxing"); writeFromHalf16(loc, (insn & 0x03ffffff) | 0x38000000); relocateNoSym(loc, R_PPC64_TOC16_LO, val); break; } default: llvm_unreachable("unexpected relocation type"); } } void PPC64::relaxTlsGdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const { // Reference: 3.7.4.2 of the 64-bit ELF V2 abi supplement. // The general dynamic code sequence for a global `x` will look like: // Instruction Relocation Symbol // addis r3, r2, x@got@tlsgd@ha R_PPC64_GOT_TLSGD16_HA x // addi r3, r3, x@got@tlsgd@l R_PPC64_GOT_TLSGD16_LO x // bl __tls_get_addr(x@tlsgd) R_PPC64_TLSGD x // R_PPC64_REL24 __tls_get_addr // nop None None // Relaxing to local exec entails converting: // addis r3, r2, x@got@tlsgd@ha into nop // addi r3, r3, x@got@tlsgd@l into addis r3, r13, x@tprel@ha // bl __tls_get_addr(x@tlsgd) into nop // nop into addi r3, r3, x@tprel@l switch (rel.type) { case R_PPC64_GOT_TLSGD16_HA: writeFromHalf16(loc, 0x60000000); // nop break; case R_PPC64_GOT_TLSGD16: case R_PPC64_GOT_TLSGD16_LO: writeFromHalf16(loc, 0x3c6d0000); // addis r3, r13 relocateNoSym(loc, R_PPC64_TPREL16_HA, val); break; case R_PPC64_TLSGD: write32(loc, 0x60000000); // nop write32(loc + 4, 0x38630000); // addi r3, r3 // Since we are relocating a half16 type relocation and Loc + 4 points to // the start of an instruction we need to advance the buffer by an extra // 2 bytes on BE. relocateNoSym(loc + 4 + (config->ekind == ELF64BEKind ? 2 : 0), R_PPC64_TPREL16_LO, val); break; default: llvm_unreachable("unsupported relocation for TLS GD to LE relaxation"); } } void PPC64::relaxTlsLdToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const { // Reference: 3.7.4.3 of the 64-bit ELF V2 abi supplement. // The local dynamic code sequence for a global `x` will look like: // Instruction Relocation Symbol // addis r3, r2, x@got@tlsld@ha R_PPC64_GOT_TLSLD16_HA x // addi r3, r3, x@got@tlsld@l R_PPC64_GOT_TLSLD16_LO x // bl __tls_get_addr(x@tlsgd) R_PPC64_TLSLD x // R_PPC64_REL24 __tls_get_addr // nop None None // Relaxing to local exec entails converting: // addis r3, r2, x@got@tlsld@ha into nop // addi r3, r3, x@got@tlsld@l into addis r3, r13, 0 // bl __tls_get_addr(x@tlsgd) into nop // nop into addi r3, r3, 4096 switch (rel.type) { case R_PPC64_GOT_TLSLD16_HA: writeFromHalf16(loc, 0x60000000); // nop break; case R_PPC64_GOT_TLSLD16_LO: writeFromHalf16(loc, 0x3c6d0000); // addis r3, r13, 0 break; case R_PPC64_TLSLD: write32(loc, 0x60000000); // nop write32(loc + 4, 0x38631000); // addi r3, r3, 4096 break; case R_PPC64_DTPREL16: case R_PPC64_DTPREL16_HA: case R_PPC64_DTPREL16_HI: case R_PPC64_DTPREL16_DS: case R_PPC64_DTPREL16_LO: case R_PPC64_DTPREL16_LO_DS: relocate(loc, rel, val); break; default: llvm_unreachable("unsupported relocation for TLS LD to LE relaxation"); } } unsigned elf::getPPCDFormOp(unsigned secondaryOp) { switch (secondaryOp) { case LBZX: return LBZ; case LHZX: return LHZ; case LWZX: return LWZ; case LDX: return LD; case STBX: return STB; case STHX: return STH; case STWX: return STW; case STDX: return STD; case ADD: return ADDI; default: return 0; } } void PPC64::relaxTlsIeToLe(uint8_t *loc, const Relocation &rel, uint64_t val) const { // The initial exec code sequence for a global `x` will look like: // Instruction Relocation Symbol // addis r9, r2, x@got@tprel@ha R_PPC64_GOT_TPREL16_HA x // ld r9, x@got@tprel@l(r9) R_PPC64_GOT_TPREL16_LO_DS x // add r9, r9, x@tls R_PPC64_TLS x // Relaxing to local exec entails converting: // addis r9, r2, x@got@tprel@ha into nop // ld r9, x@got@tprel@l(r9) into addis r9, r13, x@tprel@ha // add r9, r9, x@tls into addi r9, r9, x@tprel@l // x@tls R_PPC64_TLS is a relocation which does not compute anything, // it is replaced with r13 (thread pointer). // The add instruction in the initial exec sequence has multiple variations // that need to be handled. If we are building an address it will use an add // instruction, if we are accessing memory it will use any of the X-form // indexed load or store instructions. unsigned offset = (config->ekind == ELF64BEKind) ? 2 : 0; switch (rel.type) { case R_PPC64_GOT_TPREL16_HA: write32(loc - offset, 0x60000000); // nop break; case R_PPC64_GOT_TPREL16_LO_DS: case R_PPC64_GOT_TPREL16_DS: { uint32_t regNo = read32(loc - offset) & 0x03E00000; // bits 6-10 write32(loc - offset, 0x3C0D0000 | regNo); // addis RegNo, r13 relocateNoSym(loc, R_PPC64_TPREL16_HA, val); break; } case R_PPC64_TLS: { uint32_t primaryOp = getPrimaryOpCode(read32(loc)); if (primaryOp != 31) error("unrecognized instruction for IE to LE R_PPC64_TLS"); uint32_t secondaryOp = (read32(loc) & 0x000007FE) >> 1; // bits 21-30 uint32_t dFormOp = getPPCDFormOp(secondaryOp); if (dFormOp == 0) error("unrecognized instruction for IE to LE R_PPC64_TLS"); write32(loc, ((dFormOp << 26) | (read32(loc) & 0x03FFFFFF))); relocateNoSym(loc + offset, R_PPC64_TPREL16_LO, val); break; } default: llvm_unreachable("unknown relocation for IE to LE"); break; } } RelExpr PPC64::getRelExpr(RelType type, const Symbol &s, const uint8_t *loc) const { switch (type) { case R_PPC64_NONE: return R_NONE; case R_PPC64_ADDR16: case R_PPC64_ADDR16_DS: case R_PPC64_ADDR16_HA: case R_PPC64_ADDR16_HI: case R_PPC64_ADDR16_HIGHER: case R_PPC64_ADDR16_HIGHERA: case R_PPC64_ADDR16_HIGHEST: case R_PPC64_ADDR16_HIGHESTA: case R_PPC64_ADDR16_LO: case R_PPC64_ADDR16_LO_DS: case R_PPC64_ADDR32: case R_PPC64_ADDR64: return R_ABS; case R_PPC64_GOT16: case R_PPC64_GOT16_DS: case R_PPC64_GOT16_HA: case R_PPC64_GOT16_HI: case R_PPC64_GOT16_LO: case R_PPC64_GOT16_LO_DS: return R_GOT_OFF; case R_PPC64_TOC16: case R_PPC64_TOC16_DS: case R_PPC64_TOC16_HI: case R_PPC64_TOC16_LO: return R_GOTREL; case R_PPC64_GOT_PCREL34: return R_GOT_PC; case R_PPC64_TOC16_HA: case R_PPC64_TOC16_LO_DS: return config->tocOptimize ? R_PPC64_RELAX_TOC : R_GOTREL; case R_PPC64_TOC: return R_PPC64_TOCBASE; case R_PPC64_REL14: case R_PPC64_REL24: return R_PPC64_CALL_PLT; case R_PPC64_REL16_LO: case R_PPC64_REL16_HA: case R_PPC64_REL16_HI: case R_PPC64_REL32: case R_PPC64_REL64: case R_PPC64_PCREL34: return R_PC; case R_PPC64_GOT_TLSGD16: case R_PPC64_GOT_TLSGD16_HA: case R_PPC64_GOT_TLSGD16_HI: case R_PPC64_GOT_TLSGD16_LO: return R_TLSGD_GOT; case R_PPC64_GOT_TLSLD16: case R_PPC64_GOT_TLSLD16_HA: case R_PPC64_GOT_TLSLD16_HI: case R_PPC64_GOT_TLSLD16_LO: return R_TLSLD_GOT; case R_PPC64_GOT_TPREL16_HA: case R_PPC64_GOT_TPREL16_LO_DS: case R_PPC64_GOT_TPREL16_DS: case R_PPC64_GOT_TPREL16_HI: return R_GOT_OFF; case R_PPC64_GOT_DTPREL16_HA: case R_PPC64_GOT_DTPREL16_LO_DS: case R_PPC64_GOT_DTPREL16_DS: case R_PPC64_GOT_DTPREL16_HI: return R_TLSLD_GOT_OFF; case R_PPC64_TPREL16: case R_PPC64_TPREL16_HA: case R_PPC64_TPREL16_LO: case R_PPC64_TPREL16_HI: case R_PPC64_TPREL16_DS: case R_PPC64_TPREL16_LO_DS: case R_PPC64_TPREL16_HIGHER: case R_PPC64_TPREL16_HIGHERA: case R_PPC64_TPREL16_HIGHEST: case R_PPC64_TPREL16_HIGHESTA: return R_TLS; case R_PPC64_DTPREL16: case R_PPC64_DTPREL16_DS: case R_PPC64_DTPREL16_HA: case R_PPC64_DTPREL16_HI: case R_PPC64_DTPREL16_HIGHER: case R_PPC64_DTPREL16_HIGHERA: case R_PPC64_DTPREL16_HIGHEST: case R_PPC64_DTPREL16_HIGHESTA: case R_PPC64_DTPREL16_LO: case R_PPC64_DTPREL16_LO_DS: case R_PPC64_DTPREL64: return R_DTPREL; case R_PPC64_TLSGD: return R_TLSDESC_CALL; case R_PPC64_TLSLD: return R_TLSLD_HINT; case R_PPC64_TLS: return R_TLSIE_HINT; default: error(getErrorLocation(loc) + "unknown relocation (" + Twine(type) + ") against symbol " + toString(s)); return R_NONE; } } RelType PPC64::getDynRel(RelType type) const { if (type == R_PPC64_ADDR64 || type == R_PPC64_TOC) return R_PPC64_ADDR64; return R_PPC64_NONE; } void PPC64::writeGotHeader(uint8_t *buf) const { write64(buf, getPPC64TocBase()); } void PPC64::writePltHeader(uint8_t *buf) const { // The generic resolver stub goes first. write32(buf + 0, 0x7c0802a6); // mflr r0 write32(buf + 4, 0x429f0005); // bcl 20,4*cr7+so,8 <_glink+0x8> write32(buf + 8, 0x7d6802a6); // mflr r11 write32(buf + 12, 0x7c0803a6); // mtlr r0 write32(buf + 16, 0x7d8b6050); // subf r12, r11, r12 write32(buf + 20, 0x380cffcc); // subi r0,r12,52 write32(buf + 24, 0x7800f082); // srdi r0,r0,62,2 write32(buf + 28, 0xe98b002c); // ld r12,44(r11) write32(buf + 32, 0x7d6c5a14); // add r11,r12,r11 write32(buf + 36, 0xe98b0000); // ld r12,0(r11) write32(buf + 40, 0xe96b0008); // ld r11,8(r11) write32(buf + 44, 0x7d8903a6); // mtctr r12 write32(buf + 48, 0x4e800420); // bctr // The 'bcl' instruction will set the link register to the address of the // following instruction ('mflr r11'). Here we store the offset from that // instruction to the first entry in the GotPlt section. int64_t gotPltOffset = in.gotPlt->getVA() - (in.plt->getVA() + 8); write64(buf + 52, gotPltOffset); } void PPC64::writePlt(uint8_t *buf, const Symbol &sym, uint64_t /*pltEntryAddr*/) const { int32_t offset = pltHeaderSize + sym.pltIndex * pltEntrySize; // bl __glink_PLTresolve write32(buf, 0x48000000 | ((-offset) & 0x03FFFFFc)); } void PPC64::writeIplt(uint8_t *buf, const Symbol &sym, uint64_t /*pltEntryAddr*/) const { writePPC64LoadAndBranch(buf, sym.getGotPltVA() - getPPC64TocBase()); } static std::pair toAddr16Rel(RelType type, uint64_t val) { // Relocations relative to the toc-base need to be adjusted by the Toc offset. uint64_t tocBiasedVal = val - ppc64TocOffset; // Relocations relative to dtv[dtpmod] need to be adjusted by the DTP offset. uint64_t dtpBiasedVal = val - dynamicThreadPointerOffset; switch (type) { // TOC biased relocation. case R_PPC64_GOT16: case R_PPC64_GOT_TLSGD16: case R_PPC64_GOT_TLSLD16: case R_PPC64_TOC16: return {R_PPC64_ADDR16, tocBiasedVal}; case R_PPC64_GOT16_DS: case R_PPC64_TOC16_DS: case R_PPC64_GOT_TPREL16_DS: case R_PPC64_GOT_DTPREL16_DS: return {R_PPC64_ADDR16_DS, tocBiasedVal}; case R_PPC64_GOT16_HA: case R_PPC64_GOT_TLSGD16_HA: case R_PPC64_GOT_TLSLD16_HA: case R_PPC64_GOT_TPREL16_HA: case R_PPC64_GOT_DTPREL16_HA: case R_PPC64_TOC16_HA: return {R_PPC64_ADDR16_HA, tocBiasedVal}; case R_PPC64_GOT16_HI: case R_PPC64_GOT_TLSGD16_HI: case R_PPC64_GOT_TLSLD16_HI: case R_PPC64_GOT_TPREL16_HI: case R_PPC64_GOT_DTPREL16_HI: case R_PPC64_TOC16_HI: return {R_PPC64_ADDR16_HI, tocBiasedVal}; case R_PPC64_GOT16_LO: case R_PPC64_GOT_TLSGD16_LO: case R_PPC64_GOT_TLSLD16_LO: case R_PPC64_TOC16_LO: return {R_PPC64_ADDR16_LO, tocBiasedVal}; case R_PPC64_GOT16_LO_DS: case R_PPC64_TOC16_LO_DS: case R_PPC64_GOT_TPREL16_LO_DS: case R_PPC64_GOT_DTPREL16_LO_DS: return {R_PPC64_ADDR16_LO_DS, tocBiasedVal}; // Dynamic Thread pointer biased relocation types. case R_PPC64_DTPREL16: return {R_PPC64_ADDR16, dtpBiasedVal}; case R_PPC64_DTPREL16_DS: return {R_PPC64_ADDR16_DS, dtpBiasedVal}; case R_PPC64_DTPREL16_HA: return {R_PPC64_ADDR16_HA, dtpBiasedVal}; case R_PPC64_DTPREL16_HI: return {R_PPC64_ADDR16_HI, dtpBiasedVal}; case R_PPC64_DTPREL16_HIGHER: return {R_PPC64_ADDR16_HIGHER, dtpBiasedVal}; case R_PPC64_DTPREL16_HIGHERA: return {R_PPC64_ADDR16_HIGHERA, dtpBiasedVal}; case R_PPC64_DTPREL16_HIGHEST: return {R_PPC64_ADDR16_HIGHEST, dtpBiasedVal}; case R_PPC64_DTPREL16_HIGHESTA: return {R_PPC64_ADDR16_HIGHESTA, dtpBiasedVal}; case R_PPC64_DTPREL16_LO: return {R_PPC64_ADDR16_LO, dtpBiasedVal}; case R_PPC64_DTPREL16_LO_DS: return {R_PPC64_ADDR16_LO_DS, dtpBiasedVal}; case R_PPC64_DTPREL64: return {R_PPC64_ADDR64, dtpBiasedVal}; default: return {type, val}; } } static bool isTocOptType(RelType type) { switch (type) { case R_PPC64_GOT16_HA: case R_PPC64_GOT16_LO_DS: case R_PPC64_TOC16_HA: case R_PPC64_TOC16_LO_DS: case R_PPC64_TOC16_LO: return true; default: return false; } } void PPC64::relocate(uint8_t *loc, const Relocation &rel, uint64_t val) const { RelType type = rel.type; bool shouldTocOptimize = isTocOptType(type); // For dynamic thread pointer relative, toc-relative, and got-indirect // relocations, proceed in terms of the corresponding ADDR16 relocation type. std::tie(type, val) = toAddr16Rel(type, val); switch (type) { case R_PPC64_ADDR14: { checkAlignment(loc, val, 4, rel); // Preserve the AA/LK bits in the branch instruction uint8_t aalk = loc[3]; write16(loc + 2, (aalk & 3) | (val & 0xfffc)); break; } case R_PPC64_ADDR16: checkIntUInt(loc, val, 16, rel); write16(loc, val); break; case R_PPC64_ADDR32: checkIntUInt(loc, val, 32, rel); write32(loc, val); break; case R_PPC64_ADDR16_DS: case R_PPC64_TPREL16_DS: { checkInt(loc, val, 16, rel); // DQ-form instructions use bits 28-31 as part of the instruction encoding // DS-form instructions only use bits 30-31. uint16_t mask = isDQFormInstruction(readFromHalf16(loc)) ? 0xf : 0x3; checkAlignment(loc, lo(val), mask + 1, rel); write16(loc, (read16(loc) & mask) | lo(val)); } break; case R_PPC64_ADDR16_HA: case R_PPC64_REL16_HA: case R_PPC64_TPREL16_HA: if (config->tocOptimize && shouldTocOptimize && ha(val) == 0) writeFromHalf16(loc, 0x60000000); else write16(loc, ha(val)); break; case R_PPC64_ADDR16_HI: case R_PPC64_REL16_HI: case R_PPC64_TPREL16_HI: write16(loc, hi(val)); break; case R_PPC64_ADDR16_HIGHER: case R_PPC64_TPREL16_HIGHER: write16(loc, higher(val)); break; case R_PPC64_ADDR16_HIGHERA: case R_PPC64_TPREL16_HIGHERA: write16(loc, highera(val)); break; case R_PPC64_ADDR16_HIGHEST: case R_PPC64_TPREL16_HIGHEST: write16(loc, highest(val)); break; case R_PPC64_ADDR16_HIGHESTA: case R_PPC64_TPREL16_HIGHESTA: write16(loc, highesta(val)); break; case R_PPC64_ADDR16_LO: case R_PPC64_REL16_LO: case R_PPC64_TPREL16_LO: // When the high-adjusted part of a toc relocation evaluates to 0, it is // changed into a nop. The lo part then needs to be updated to use the // toc-pointer register r2, as the base register. if (config->tocOptimize && shouldTocOptimize && ha(val) == 0) { uint32_t insn = readFromHalf16(loc); if (isInstructionUpdateForm(insn)) error(getErrorLocation(loc) + "can't toc-optimize an update instruction: 0x" + utohexstr(insn)); writeFromHalf16(loc, (insn & 0xffe00000) | 0x00020000 | lo(val)); } else { write16(loc, lo(val)); } break; case R_PPC64_ADDR16_LO_DS: case R_PPC64_TPREL16_LO_DS: { // DQ-form instructions use bits 28-31 as part of the instruction encoding // DS-form instructions only use bits 30-31. uint32_t insn = readFromHalf16(loc); uint16_t mask = isDQFormInstruction(insn) ? 0xf : 0x3; checkAlignment(loc, lo(val), mask + 1, rel); if (config->tocOptimize && shouldTocOptimize && ha(val) == 0) { // When the high-adjusted part of a toc relocation evaluates to 0, it is // changed into a nop. The lo part then needs to be updated to use the toc // pointer register r2, as the base register. if (isInstructionUpdateForm(insn)) error(getErrorLocation(loc) + "Can't toc-optimize an update instruction: 0x" + Twine::utohexstr(insn)); insn &= 0xffe00000 | mask; writeFromHalf16(loc, insn | 0x00020000 | lo(val)); } else { write16(loc, (read16(loc) & mask) | lo(val)); } } break; case R_PPC64_TPREL16: checkInt(loc, val, 16, rel); write16(loc, val); break; case R_PPC64_REL32: checkInt(loc, val, 32, rel); write32(loc, val); break; case R_PPC64_ADDR64: case R_PPC64_REL64: case R_PPC64_TOC: write64(loc, val); break; case R_PPC64_REL14: { uint32_t mask = 0x0000FFFC; checkInt(loc, val, 16, rel); checkAlignment(loc, val, 4, rel); write32(loc, (read32(loc) & ~mask) | (val & mask)); break; } case R_PPC64_REL24: { uint32_t mask = 0x03FFFFFC; checkInt(loc, val, 26, rel); checkAlignment(loc, val, 4, rel); write32(loc, (read32(loc) & ~mask) | (val & mask)); break; } case R_PPC64_DTPREL64: write64(loc, val - dynamicThreadPointerOffset); break; case R_PPC64_PCREL34: { const uint64_t si0Mask = 0x00000003ffff0000; const uint64_t si1Mask = 0x000000000000ffff; const uint64_t fullMask = 0x0003ffff0000ffff; checkInt(loc, val, 34, rel); uint64_t instr = readPrefixedInstruction(loc) & ~fullMask; writePrefixedInstruction(loc, instr | ((val & si0Mask) << 16) | (val & si1Mask)); break; } case R_PPC64_GOT_PCREL34: { const uint64_t si0Mask = 0x00000003ffff0000; const uint64_t si1Mask = 0x000000000000ffff; const uint64_t fullMask = 0x0003ffff0000ffff; checkInt(loc, val, 34, rel); uint64_t instr = readPrefixedInstruction(loc) & ~fullMask; writePrefixedInstruction(loc, instr | ((val & si0Mask) << 16) | (val & si1Mask)); break; } default: llvm_unreachable("unknown relocation"); } } bool PPC64::needsThunk(RelExpr expr, RelType type, const InputFile *file, uint64_t branchAddr, const Symbol &s, int64_t a) const { if (type != R_PPC64_REL14 && type != R_PPC64_REL24) return false; // If a function is in the Plt it needs to be called with a call-stub. if (s.isInPlt()) return true; + // This check looks at the st_other bits of the callee. If the value is 1 + // then the callee clobbers the TOC and we need an R2 save stub. + if ((s.stOther >> 5) == 1) + return true; + // If a symbol is a weak undefined and we are compiling an executable // it doesn't need a range-extending thunk since it can't be called. if (s.isUndefWeak() && !config->shared) return false; // If the offset exceeds the range of the branch type then it will need // a range-extending thunk. // See the comment in getRelocTargetVA() about R_PPC64_CALL. return !inBranchRange(type, branchAddr, s.getVA(a) + getPPC64GlobalEntryToLocalEntryOffset(s.stOther)); } uint32_t PPC64::getThunkSectionSpacing() const { // See comment in Arch/ARM.cpp for a more detailed explanation of // getThunkSectionSpacing(). For PPC64 we pick the constant here based on // R_PPC64_REL24, which is used by unconditional branch instructions. // 0x2000000 = (1 << 24-1) * 4 return 0x2000000; } bool PPC64::inBranchRange(RelType type, uint64_t src, uint64_t dst) const { int64_t offset = dst - src; if (type == R_PPC64_REL14) return isInt<16>(offset); if (type == R_PPC64_REL24) return isInt<26>(offset); llvm_unreachable("unsupported relocation type used in branch"); } RelExpr PPC64::adjustRelaxExpr(RelType type, const uint8_t *data, RelExpr expr) const { if (expr == R_RELAX_TLS_GD_TO_IE) return R_RELAX_TLS_GD_TO_IE_GOT_OFF; if (expr == R_RELAX_TLS_LD_TO_LE) return R_RELAX_TLS_LD_TO_LE_ABS; return expr; } // Reference: 3.7.4.1 of the 64-bit ELF V2 abi supplement. // The general dynamic code sequence for a global `x` uses 4 instructions. // Instruction Relocation Symbol // addis r3, r2, x@got@tlsgd@ha R_PPC64_GOT_TLSGD16_HA x // addi r3, r3, x@got@tlsgd@l R_PPC64_GOT_TLSGD16_LO x // bl __tls_get_addr(x@tlsgd) R_PPC64_TLSGD x // R_PPC64_REL24 __tls_get_addr // nop None None // // Relaxing to initial-exec entails: // 1) Convert the addis/addi pair that builds the address of the tls_index // struct for 'x' to an addis/ld pair that loads an offset from a got-entry. // 2) Convert the call to __tls_get_addr to a nop. // 3) Convert the nop following the call to an add of the loaded offset to the // thread pointer. // Since the nop must directly follow the call, the R_PPC64_TLSGD relocation is // used as the relaxation hint for both steps 2 and 3. void PPC64::relaxTlsGdToIe(uint8_t *loc, const Relocation &rel, uint64_t val) const { switch (rel.type) { case R_PPC64_GOT_TLSGD16_HA: // This is relaxed from addis rT, r2, sym@got@tlsgd@ha to // addis rT, r2, sym@got@tprel@ha. relocateNoSym(loc, R_PPC64_GOT_TPREL16_HA, val); return; case R_PPC64_GOT_TLSGD16: case R_PPC64_GOT_TLSGD16_LO: { // Relax from addi r3, rA, sym@got@tlsgd@l to // ld r3, sym@got@tprel@l(rA) uint32_t ra = (readFromHalf16(loc) & (0x1f << 16)); writeFromHalf16(loc, 0xe8600000 | ra); relocateNoSym(loc, R_PPC64_GOT_TPREL16_LO_DS, val); return; } case R_PPC64_TLSGD: write32(loc, 0x60000000); // bl __tls_get_addr(sym@tlsgd) --> nop write32(loc + 4, 0x7c636A14); // nop --> add r3, r3, r13 return; default: llvm_unreachable("unsupported relocation for TLS GD to IE relaxation"); } } // The prologue for a split-stack function is expected to look roughly // like this: // .Lglobal_entry_point: // # TOC pointer initialization. // ... // .Llocal_entry_point: // # load the __private_ss member of the threads tcbhead. // ld r0,-0x7000-64(r13) // # subtract the functions stack size from the stack pointer. // addis r12, r1, ha(-stack-frame size) // addi r12, r12, l(-stack-frame size) // # compare needed to actual and branch to allocate_more_stack if more // # space is needed, otherwise fallthrough to 'normal' function body. // cmpld cr7,r12,r0 // blt- cr7, .Lallocate_more_stack // // -) The allocate_more_stack block might be placed after the split-stack // prologue and the `blt-` replaced with a `bge+ .Lnormal_func_body` // instead. // -) If either the addis or addi is not needed due to the stack size being // smaller then 32K or a multiple of 64K they will be replaced with a nop, // but there will always be 2 instructions the linker can overwrite for the // adjusted stack size. // // The linkers job here is to increase the stack size used in the addis/addi // pair by split-stack-size-adjust. // addis r12, r1, ha(-stack-frame size - split-stack-adjust-size) // addi r12, r12, l(-stack-frame size - split-stack-adjust-size) bool PPC64::adjustPrologueForCrossSplitStack(uint8_t *loc, uint8_t *end, uint8_t stOther) const { // If the caller has a global entry point adjust the buffer past it. The start // of the split-stack prologue will be at the local entry point. loc += getPPC64GlobalEntryToLocalEntryOffset(stOther); // At the very least we expect to see a load of some split-stack data from the // tcb, and 2 instructions that calculate the ending stack address this // function will require. If there is not enough room for at least 3 // instructions it can't be a split-stack prologue. if (loc + 12 >= end) return false; // First instruction must be `ld r0, -0x7000-64(r13)` if (read32(loc) != 0xe80d8fc0) return false; int16_t hiImm = 0; int16_t loImm = 0; // First instruction can be either an addis if the frame size is larger then // 32K, or an addi if the size is less then 32K. int32_t firstInstr = read32(loc + 4); if (getPrimaryOpCode(firstInstr) == 15) { hiImm = firstInstr & 0xFFFF; } else if (getPrimaryOpCode(firstInstr) == 14) { loImm = firstInstr & 0xFFFF; } else { return false; } // Second instruction is either an addi or a nop. If the first instruction was // an addi then LoImm is set and the second instruction must be a nop. uint32_t secondInstr = read32(loc + 8); if (!loImm && getPrimaryOpCode(secondInstr) == 14) { loImm = secondInstr & 0xFFFF; } else if (secondInstr != 0x60000000) { return false; } // The register operands of the first instruction should be the stack-pointer // (r1) as the input (RA) and r12 as the output (RT). If the second // instruction is not a nop, then it should use r12 as both input and output. auto checkRegOperands = [](uint32_t instr, uint8_t expectedRT, uint8_t expectedRA) { return ((instr & 0x3E00000) >> 21 == expectedRT) && ((instr & 0x1F0000) >> 16 == expectedRA); }; if (!checkRegOperands(firstInstr, 12, 1)) return false; if (secondInstr != 0x60000000 && !checkRegOperands(secondInstr, 12, 12)) return false; int32_t stackFrameSize = (hiImm * 65536) + loImm; // Check that the adjusted size doesn't overflow what we can represent with 2 // instructions. if (stackFrameSize < config->splitStackAdjustSize + INT32_MIN) { error(getErrorLocation(loc) + "split-stack prologue adjustment overflows"); return false; } int32_t adjustedStackFrameSize = stackFrameSize - config->splitStackAdjustSize; loImm = adjustedStackFrameSize & 0xFFFF; hiImm = (adjustedStackFrameSize + 0x8000) >> 16; if (hiImm) { write32(loc + 4, 0x3D810000 | (uint16_t)hiImm); // If the low immediate is zero the second instruction will be a nop. secondInstr = loImm ? 0x398C0000 | (uint16_t)loImm : 0x60000000; write32(loc + 8, secondInstr); } else { // addi r12, r1, imm write32(loc + 4, (0x39810000) | (uint16_t)loImm); write32(loc + 8, 0x60000000); } return true; } TargetInfo *elf::getPPC64TargetInfo() { static PPC64 target; return ⌖ } diff --git a/lld/ELF/Thunks.cpp b/lld/ELF/Thunks.cpp index 744ceaf725cf..ea74d343ebb2 100644 --- a/lld/ELF/Thunks.cpp +++ b/lld/ELF/Thunks.cpp @@ -1,979 +1,1013 @@ //===- Thunks.cpp --------------------------------------------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===---------------------------------------------------------------------===// // // This file contains Thunk subclasses. // // A thunk is a small piece of code written after an input section // which is used to jump between "incompatible" functions // such as MIPS PIC and non-PIC or ARM non-Thumb and Thumb functions. // // If a jump target is too far and its address doesn't fit to a // short jump instruction, we need to create a thunk too, but we // haven't supported it yet. // // i386 and x86-64 don't need thunks. // //===---------------------------------------------------------------------===// #include "Thunks.h" #include "Config.h" #include "InputSection.h" #include "OutputSections.h" #include "Symbols.h" #include "SyntheticSections.h" #include "Target.h" #include "lld/Common/ErrorHandler.h" #include "lld/Common/Memory.h" #include "llvm/BinaryFormat/ELF.h" #include "llvm/Support/Casting.h" #include "llvm/Support/Endian.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" #include #include using namespace llvm; using namespace llvm::object; using namespace llvm::ELF; using namespace lld; using namespace lld::elf; namespace { // AArch64 long range Thunks class AArch64ABSLongThunk final : public Thunk { public: AArch64ABSLongThunk(Symbol &dest, int64_t addend) : Thunk(dest, addend) {} uint32_t size() override { return 16; } void writeTo(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; }; class AArch64ADRPThunk final : public Thunk { public: AArch64ADRPThunk(Symbol &dest, int64_t addend) : Thunk(dest, addend) {} uint32_t size() override { return 12; } void writeTo(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; }; // Base class for ARM thunks. // // An ARM thunk may be either short or long. A short thunk is simply a branch // (B) instruction, and it may be used to call ARM functions when the distance // from the thunk to the target is less than 32MB. Long thunks can branch to any // virtual address and can switch between ARM and Thumb, and they are // implemented in the derived classes. This class tries to create a short thunk // if the target is in range, otherwise it creates a long thunk. class ARMThunk : public Thunk { public: ARMThunk(Symbol &dest) : Thunk(dest, 0) {} bool getMayUseShortThunk(); uint32_t size() override { return getMayUseShortThunk() ? 4 : sizeLong(); } void writeTo(uint8_t *buf) override; bool isCompatibleWith(const InputSection &isec, const Relocation &rel) const override; // Returns the size of a long thunk. virtual uint32_t sizeLong() = 0; // Writes a long thunk to Buf. virtual void writeLong(uint8_t *buf) = 0; private: // This field tracks whether all previously considered layouts would allow // this thunk to be short. If we have ever needed a long thunk, we always // create a long thunk, even if the thunk may be short given the current // distance to the target. We do this because transitioning from long to short // can create layout oscillations in certain corner cases which would prevent // the layout from converging. bool mayUseShortThunk = true; }; // Base class for Thumb-2 thunks. // // This class is similar to ARMThunk, but it uses the Thumb-2 B.W instruction // which has a range of 16MB. class ThumbThunk : public Thunk { public: ThumbThunk(Symbol &dest) : Thunk(dest, 0) { alignment = 2; } bool getMayUseShortThunk(); uint32_t size() override { return getMayUseShortThunk() ? 4 : sizeLong(); } void writeTo(uint8_t *buf) override; bool isCompatibleWith(const InputSection &isec, const Relocation &rel) const override; // Returns the size of a long thunk. virtual uint32_t sizeLong() = 0; // Writes a long thunk to Buf. virtual void writeLong(uint8_t *buf) = 0; private: // See comment in ARMThunk above. bool mayUseShortThunk = true; }; // Specific ARM Thunk implementations. The naming convention is: // Source State, TargetState, Target Requirement, ABS or PI, Range class ARMV7ABSLongThunk final : public ARMThunk { public: ARMV7ABSLongThunk(Symbol &dest) : ARMThunk(dest) {} uint32_t sizeLong() override { return 12; } void writeLong(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; }; class ARMV7PILongThunk final : public ARMThunk { public: ARMV7PILongThunk(Symbol &dest) : ARMThunk(dest) {} uint32_t sizeLong() override { return 16; } void writeLong(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; }; class ThumbV7ABSLongThunk final : public ThumbThunk { public: ThumbV7ABSLongThunk(Symbol &dest) : ThumbThunk(dest) {} uint32_t sizeLong() override { return 10; } void writeLong(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; }; class ThumbV7PILongThunk final : public ThumbThunk { public: ThumbV7PILongThunk(Symbol &dest) : ThumbThunk(dest) {} uint32_t sizeLong() override { return 12; } void writeLong(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; }; // Implementations of Thunks for older Arm architectures that do not support // the movt/movw instructions. These thunks require at least Architecture v5 // as used on processors such as the Arm926ej-s. There are no Thumb entry // points as there is no Thumb branch instruction on these architecture that // can result in a thunk class ARMV5ABSLongThunk final : public ARMThunk { public: ARMV5ABSLongThunk(Symbol &dest) : ARMThunk(dest) {} uint32_t sizeLong() override { return 8; } void writeLong(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; bool isCompatibleWith(const InputSection &isec, const Relocation &rel) const override; }; class ARMV5PILongThunk final : public ARMThunk { public: ARMV5PILongThunk(Symbol &dest) : ARMThunk(dest) {} uint32_t sizeLong() override { return 16; } void writeLong(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; bool isCompatibleWith(const InputSection &isec, const Relocation &rel) const override; }; // Implementations of Thunks for Arm v6-M. Only Thumb instructions are permitted class ThumbV6MABSLongThunk final : public ThumbThunk { public: ThumbV6MABSLongThunk(Symbol &dest) : ThumbThunk(dest) {} uint32_t sizeLong() override { return 12; } void writeLong(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; }; class ThumbV6MPILongThunk final : public ThumbThunk { public: ThumbV6MPILongThunk(Symbol &dest) : ThumbThunk(dest) {} uint32_t sizeLong() override { return 16; } void writeLong(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; }; // MIPS LA25 thunk class MipsThunk final : public Thunk { public: MipsThunk(Symbol &dest) : Thunk(dest, 0) {} uint32_t size() override { return 16; } void writeTo(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; InputSection *getTargetInputSection() const override; }; // microMIPS R2-R5 LA25 thunk class MicroMipsThunk final : public Thunk { public: MicroMipsThunk(Symbol &dest) : Thunk(dest, 0) {} uint32_t size() override { return 14; } void writeTo(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; InputSection *getTargetInputSection() const override; }; // microMIPS R6 LA25 thunk class MicroMipsR6Thunk final : public Thunk { public: MicroMipsR6Thunk(Symbol &dest) : Thunk(dest, 0) {} uint32_t size() override { return 12; } void writeTo(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; InputSection *getTargetInputSection() const override; }; class PPC32PltCallStub final : public Thunk { public: // For R_PPC_PLTREL24, Thunk::addend records the addend which will be used to // decide the offsets in the call stub. PPC32PltCallStub(const InputSection &isec, const Relocation &rel, Symbol &dest) : Thunk(dest, rel.addend), file(isec.file) {} uint32_t size() override { return 16; } void writeTo(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; bool isCompatibleWith(const InputSection &isec, const Relocation &rel) const override; private: // Records the call site of the call stub. const InputFile *file; }; class PPC32LongThunk final : public Thunk { public: PPC32LongThunk(Symbol &dest, int64_t addend) : Thunk(dest, addend) {} uint32_t size() override { return config->isPic ? 32 : 16; } void writeTo(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; }; // PPC64 Plt call stubs. // Any call site that needs to call through a plt entry needs a call stub in // the .text section. The call stub is responsible for: // 1) Saving the toc-pointer to the stack. // 2) Loading the target functions address from the procedure linkage table into // r12 for use by the target functions global entry point, and into the count // register. // 3) Transferring control to the target function through an indirect branch. class PPC64PltCallStub final : public Thunk { public: PPC64PltCallStub(Symbol &dest) : Thunk(dest, 0) {} uint32_t size() override { return 20; } void writeTo(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; }; +// PPC64 R2 Save Stub +// When the caller requires a valid R2 TOC pointer but the callee does not +// require a TOC pointer and the callee cannot guarantee that it doesn't +// clobber R2 then we need to save R2. This stub: +// 1) Saves the TOC pointer to the stack. +// 2) Tail calls the callee. +class PPC64R2SaveStub final : public Thunk { +public: + PPC64R2SaveStub(Symbol &dest) : Thunk(dest, 0) {} + uint32_t size() override { return 8; } + void writeTo(uint8_t *buf) override; + void addSymbols(ThunkSection &isec) override; +}; + // A bl instruction uses a signed 24 bit offset, with an implicit 4 byte // alignment. This gives a possible 26 bits of 'reach'. If the call offset is // larger then that we need to emit a long-branch thunk. The target address // of the callee is stored in a table to be accessed TOC-relative. Since the // call must be local (a non-local call will have a PltCallStub instead) the // table stores the address of the callee's local entry point. For // position-independent code a corresponding relative dynamic relocation is // used. class PPC64LongBranchThunk : public Thunk { public: uint32_t size() override { return 16; } void writeTo(uint8_t *buf) override; void addSymbols(ThunkSection &isec) override; protected: PPC64LongBranchThunk(Symbol &dest, int64_t addend) : Thunk(dest, addend) {} }; class PPC64PILongBranchThunk final : public PPC64LongBranchThunk { public: PPC64PILongBranchThunk(Symbol &dest, int64_t addend) : PPC64LongBranchThunk(dest, addend) { assert(!dest.isPreemptible); if (Optional index = in.ppc64LongBranchTarget->addEntry(&dest, addend)) { mainPart->relaDyn->addReloc( {target->relativeRel, in.ppc64LongBranchTarget, *index * UINT64_C(8), true, &dest, addend + getPPC64GlobalEntryToLocalEntryOffset(dest.stOther)}); } } }; class PPC64PDLongBranchThunk final : public PPC64LongBranchThunk { public: PPC64PDLongBranchThunk(Symbol &dest, int64_t addend) : PPC64LongBranchThunk(dest, addend) { in.ppc64LongBranchTarget->addEntry(&dest, addend); } }; } // end anonymous namespace Defined *Thunk::addSymbol(StringRef name, uint8_t type, uint64_t value, InputSectionBase §ion) { Defined *d = addSyntheticLocal(name, type, value, /*size=*/0, section); syms.push_back(d); return d; } void Thunk::setOffset(uint64_t newOffset) { for (Defined *d : syms) d->value = d->value - offset + newOffset; offset = newOffset; } // AArch64 long range Thunks static uint64_t getAArch64ThunkDestVA(const Symbol &s, int64_t a) { uint64_t v = s.isInPlt() ? s.getPltVA() : s.getVA(a); return v; } void AArch64ABSLongThunk::writeTo(uint8_t *buf) { const uint8_t data[] = { 0x50, 0x00, 0x00, 0x58, // ldr x16, L0 0x00, 0x02, 0x1f, 0xd6, // br x16 0x00, 0x00, 0x00, 0x00, // L0: .xword S 0x00, 0x00, 0x00, 0x00, }; uint64_t s = getAArch64ThunkDestVA(destination, addend); memcpy(buf, data, sizeof(data)); target->relocateNoSym(buf + 8, R_AARCH64_ABS64, s); } void AArch64ABSLongThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__AArch64AbsLongThunk_" + destination.getName()), STT_FUNC, 0, isec); addSymbol("$x", STT_NOTYPE, 0, isec); addSymbol("$d", STT_NOTYPE, 8, isec); } // This Thunk has a maximum range of 4Gb, this is sufficient for all programs // using the small code model, including pc-relative ones. At time of writing // clang and gcc do not support the large code model for position independent // code so it is safe to use this for position independent thunks without // worrying about the destination being more than 4Gb away. void AArch64ADRPThunk::writeTo(uint8_t *buf) { const uint8_t data[] = { 0x10, 0x00, 0x00, 0x90, // adrp x16, Dest R_AARCH64_ADR_PREL_PG_HI21(Dest) 0x10, 0x02, 0x00, 0x91, // add x16, x16, R_AARCH64_ADD_ABS_LO12_NC(Dest) 0x00, 0x02, 0x1f, 0xd6, // br x16 }; uint64_t s = getAArch64ThunkDestVA(destination, addend); uint64_t p = getThunkTargetSym()->getVA(); memcpy(buf, data, sizeof(data)); target->relocateNoSym(buf, R_AARCH64_ADR_PREL_PG_HI21, getAArch64Page(s) - getAArch64Page(p)); target->relocateNoSym(buf + 4, R_AARCH64_ADD_ABS_LO12_NC, s); } void AArch64ADRPThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__AArch64ADRPThunk_" + destination.getName()), STT_FUNC, 0, isec); addSymbol("$x", STT_NOTYPE, 0, isec); } // ARM Target Thunks static uint64_t getARMThunkDestVA(const Symbol &s) { uint64_t v = s.isInPlt() ? s.getPltVA() : s.getVA(); return SignExtend64<32>(v); } // This function returns true if the target is not Thumb and is within 2^26, and // it has not previously returned false (see comment for mayUseShortThunk). bool ARMThunk::getMayUseShortThunk() { if (!mayUseShortThunk) return false; uint64_t s = getARMThunkDestVA(destination); if (s & 1) { mayUseShortThunk = false; return false; } uint64_t p = getThunkTargetSym()->getVA(); int64_t offset = s - p - 8; mayUseShortThunk = llvm::isInt<26>(offset); return mayUseShortThunk; } void ARMThunk::writeTo(uint8_t *buf) { if (!getMayUseShortThunk()) { writeLong(buf); return; } uint64_t s = getARMThunkDestVA(destination); uint64_t p = getThunkTargetSym()->getVA(); int64_t offset = s - p - 8; const uint8_t data[] = { 0x00, 0x00, 0x00, 0xea, // b S }; memcpy(buf, data, sizeof(data)); target->relocateNoSym(buf, R_ARM_JUMP24, offset); } bool ARMThunk::isCompatibleWith(const InputSection &isec, const Relocation &rel) const { // Thumb branch relocations can't use BLX return rel.type != R_ARM_THM_JUMP19 && rel.type != R_ARM_THM_JUMP24; } // This function returns true if the target is Thumb and is within 2^25, and // it has not previously returned false (see comment for mayUseShortThunk). bool ThumbThunk::getMayUseShortThunk() { if (!mayUseShortThunk) return false; uint64_t s = getARMThunkDestVA(destination); if ((s & 1) == 0) { mayUseShortThunk = false; return false; } uint64_t p = getThunkTargetSym()->getVA() & ~1; int64_t offset = s - p - 4; mayUseShortThunk = llvm::isInt<25>(offset); return mayUseShortThunk; } void ThumbThunk::writeTo(uint8_t *buf) { if (!getMayUseShortThunk()) { writeLong(buf); return; } uint64_t s = getARMThunkDestVA(destination); uint64_t p = getThunkTargetSym()->getVA(); int64_t offset = s - p - 4; const uint8_t data[] = { 0x00, 0xf0, 0x00, 0xb0, // b.w S }; memcpy(buf, data, sizeof(data)); target->relocateNoSym(buf, R_ARM_THM_JUMP24, offset); } bool ThumbThunk::isCompatibleWith(const InputSection &isec, const Relocation &rel) const { // ARM branch relocations can't use BLX return rel.type != R_ARM_JUMP24 && rel.type != R_ARM_PC24 && rel.type != R_ARM_PLT32; } void ARMV7ABSLongThunk::writeLong(uint8_t *buf) { const uint8_t data[] = { 0x00, 0xc0, 0x00, 0xe3, // movw ip,:lower16:S 0x00, 0xc0, 0x40, 0xe3, // movt ip,:upper16:S 0x1c, 0xff, 0x2f, 0xe1, // bx ip }; uint64_t s = getARMThunkDestVA(destination); memcpy(buf, data, sizeof(data)); target->relocateNoSym(buf, R_ARM_MOVW_ABS_NC, s); target->relocateNoSym(buf + 4, R_ARM_MOVT_ABS, s); } void ARMV7ABSLongThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__ARMv7ABSLongThunk_" + destination.getName()), STT_FUNC, 0, isec); addSymbol("$a", STT_NOTYPE, 0, isec); } void ThumbV7ABSLongThunk::writeLong(uint8_t *buf) { const uint8_t data[] = { 0x40, 0xf2, 0x00, 0x0c, // movw ip, :lower16:S 0xc0, 0xf2, 0x00, 0x0c, // movt ip, :upper16:S 0x60, 0x47, // bx ip }; uint64_t s = getARMThunkDestVA(destination); memcpy(buf, data, sizeof(data)); target->relocateNoSym(buf, R_ARM_THM_MOVW_ABS_NC, s); target->relocateNoSym(buf + 4, R_ARM_THM_MOVT_ABS, s); } void ThumbV7ABSLongThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__Thumbv7ABSLongThunk_" + destination.getName()), STT_FUNC, 1, isec); addSymbol("$t", STT_NOTYPE, 0, isec); } void ARMV7PILongThunk::writeLong(uint8_t *buf) { const uint8_t data[] = { 0xf0, 0xcf, 0x0f, 0xe3, // P: movw ip,:lower16:S - (P + (L1-P) + 8) 0x00, 0xc0, 0x40, 0xe3, // movt ip,:upper16:S - (P + (L1-P) + 8) 0x0f, 0xc0, 0x8c, 0xe0, // L1: add ip, ip, pc 0x1c, 0xff, 0x2f, 0xe1, // bx ip }; uint64_t s = getARMThunkDestVA(destination); uint64_t p = getThunkTargetSym()->getVA(); int64_t offset = s - p - 16; memcpy(buf, data, sizeof(data)); target->relocateNoSym(buf, R_ARM_MOVW_PREL_NC, offset); target->relocateNoSym(buf + 4, R_ARM_MOVT_PREL, offset); } void ARMV7PILongThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__ARMV7PILongThunk_" + destination.getName()), STT_FUNC, 0, isec); addSymbol("$a", STT_NOTYPE, 0, isec); } void ThumbV7PILongThunk::writeLong(uint8_t *buf) { const uint8_t data[] = { 0x4f, 0xf6, 0xf4, 0x7c, // P: movw ip,:lower16:S - (P + (L1-P) + 4) 0xc0, 0xf2, 0x00, 0x0c, // movt ip,:upper16:S - (P + (L1-P) + 4) 0xfc, 0x44, // L1: add ip, pc 0x60, 0x47, // bx ip }; uint64_t s = getARMThunkDestVA(destination); uint64_t p = getThunkTargetSym()->getVA() & ~0x1; int64_t offset = s - p - 12; memcpy(buf, data, sizeof(data)); target->relocateNoSym(buf, R_ARM_THM_MOVW_PREL_NC, offset); target->relocateNoSym(buf + 4, R_ARM_THM_MOVT_PREL, offset); } void ThumbV7PILongThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__ThumbV7PILongThunk_" + destination.getName()), STT_FUNC, 1, isec); addSymbol("$t", STT_NOTYPE, 0, isec); } void ARMV5ABSLongThunk::writeLong(uint8_t *buf) { const uint8_t data[] = { 0x04, 0xf0, 0x1f, 0xe5, // ldr pc, [pc,#-4] ; L1 0x00, 0x00, 0x00, 0x00, // L1: .word S }; memcpy(buf, data, sizeof(data)); target->relocateNoSym(buf + 4, R_ARM_ABS32, getARMThunkDestVA(destination)); } void ARMV5ABSLongThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__ARMv5ABSLongThunk_" + destination.getName()), STT_FUNC, 0, isec); addSymbol("$a", STT_NOTYPE, 0, isec); addSymbol("$d", STT_NOTYPE, 4, isec); } bool ARMV5ABSLongThunk::isCompatibleWith(const InputSection &isec, const Relocation &rel) const { // Thumb branch relocations can't use BLX return rel.type != R_ARM_THM_JUMP19 && rel.type != R_ARM_THM_JUMP24; } void ARMV5PILongThunk::writeLong(uint8_t *buf) { const uint8_t data[] = { 0x04, 0xc0, 0x9f, 0xe5, // P: ldr ip, [pc,#4] ; L2 0x0c, 0xc0, 0x8f, 0xe0, // L1: add ip, pc, ip 0x1c, 0xff, 0x2f, 0xe1, // bx ip 0x00, 0x00, 0x00, 0x00, // L2: .word S - (P + (L1 - P) + 8) }; uint64_t s = getARMThunkDestVA(destination); uint64_t p = getThunkTargetSym()->getVA() & ~0x1; memcpy(buf, data, sizeof(data)); target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 12); } void ARMV5PILongThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__ARMV5PILongThunk_" + destination.getName()), STT_FUNC, 0, isec); addSymbol("$a", STT_NOTYPE, 0, isec); addSymbol("$d", STT_NOTYPE, 12, isec); } bool ARMV5PILongThunk::isCompatibleWith(const InputSection &isec, const Relocation &rel) const { // Thumb branch relocations can't use BLX return rel.type != R_ARM_THM_JUMP19 && rel.type != R_ARM_THM_JUMP24; } void ThumbV6MABSLongThunk::writeLong(uint8_t *buf) { // Most Thumb instructions cannot access the high registers r8 - r15. As the // only register we can corrupt is r12 we must instead spill a low register // to the stack to use as a scratch register. We push r1 even though we // don't need to get some space to use for the return address. const uint8_t data[] = { 0x03, 0xb4, // push {r0, r1} ; Obtain scratch registers 0x01, 0x48, // ldr r0, [pc, #4] ; L1 0x01, 0x90, // str r0, [sp, #4] ; SP + 4 = S 0x01, 0xbd, // pop {r0, pc} ; restore r0 and branch to dest 0x00, 0x00, 0x00, 0x00 // L1: .word S }; uint64_t s = getARMThunkDestVA(destination); memcpy(buf, data, sizeof(data)); target->relocateNoSym(buf + 8, R_ARM_ABS32, s); } void ThumbV6MABSLongThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__Thumbv6MABSLongThunk_" + destination.getName()), STT_FUNC, 1, isec); addSymbol("$t", STT_NOTYPE, 0, isec); addSymbol("$d", STT_NOTYPE, 8, isec); } void ThumbV6MPILongThunk::writeLong(uint8_t *buf) { // Most Thumb instructions cannot access the high registers r8 - r15. As the // only register we can corrupt is ip (r12) we must instead spill a low // register to the stack to use as a scratch register. const uint8_t data[] = { 0x01, 0xb4, // P: push {r0} ; Obtain scratch register 0x02, 0x48, // ldr r0, [pc, #8] ; L2 0x84, 0x46, // mov ip, r0 ; high to low register 0x01, 0xbc, // pop {r0} ; restore scratch register 0xe7, 0x44, // L1: add pc, ip ; transfer control 0xc0, 0x46, // nop ; pad to 4-byte boundary 0x00, 0x00, 0x00, 0x00, // L2: .word S - (P + (L1 - P) + 4) }; uint64_t s = getARMThunkDestVA(destination); uint64_t p = getThunkTargetSym()->getVA() & ~0x1; memcpy(buf, data, sizeof(data)); target->relocateNoSym(buf + 12, R_ARM_REL32, s - p - 12); } void ThumbV6MPILongThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__Thumbv6MPILongThunk_" + destination.getName()), STT_FUNC, 1, isec); addSymbol("$t", STT_NOTYPE, 0, isec); addSymbol("$d", STT_NOTYPE, 12, isec); } // Write MIPS LA25 thunk code to call PIC function from the non-PIC one. void MipsThunk::writeTo(uint8_t *buf) { uint64_t s = destination.getVA(); write32(buf, 0x3c190000); // lui $25, %hi(func) write32(buf + 4, 0x08000000 | (s >> 2)); // j func write32(buf + 8, 0x27390000); // addiu $25, $25, %lo(func) write32(buf + 12, 0x00000000); // nop target->relocateNoSym(buf, R_MIPS_HI16, s); target->relocateNoSym(buf + 8, R_MIPS_LO16, s); } void MipsThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__LA25Thunk_" + destination.getName()), STT_FUNC, 0, isec); } InputSection *MipsThunk::getTargetInputSection() const { auto &dr = cast(destination); return dyn_cast(dr.section); } // Write microMIPS R2-R5 LA25 thunk code // to call PIC function from the non-PIC one. void MicroMipsThunk::writeTo(uint8_t *buf) { uint64_t s = destination.getVA(); write16(buf, 0x41b9); // lui $25, %hi(func) write16(buf + 4, 0xd400); // j func write16(buf + 8, 0x3339); // addiu $25, $25, %lo(func) write16(buf + 12, 0x0c00); // nop target->relocateNoSym(buf, R_MICROMIPS_HI16, s); target->relocateNoSym(buf + 4, R_MICROMIPS_26_S1, s); target->relocateNoSym(buf + 8, R_MICROMIPS_LO16, s); } void MicroMipsThunk::addSymbols(ThunkSection &isec) { Defined *d = addSymbol( saver.save("__microLA25Thunk_" + destination.getName()), STT_FUNC, 0, isec); d->stOther |= STO_MIPS_MICROMIPS; } InputSection *MicroMipsThunk::getTargetInputSection() const { auto &dr = cast(destination); return dyn_cast(dr.section); } // Write microMIPS R6 LA25 thunk code // to call PIC function from the non-PIC one. void MicroMipsR6Thunk::writeTo(uint8_t *buf) { uint64_t s = destination.getVA(); uint64_t p = getThunkTargetSym()->getVA(); write16(buf, 0x1320); // lui $25, %hi(func) write16(buf + 4, 0x3339); // addiu $25, $25, %lo(func) write16(buf + 8, 0x9400); // bc func target->relocateNoSym(buf, R_MICROMIPS_HI16, s); target->relocateNoSym(buf + 4, R_MICROMIPS_LO16, s); target->relocateNoSym(buf + 8, R_MICROMIPS_PC26_S1, s - p - 12); } void MicroMipsR6Thunk::addSymbols(ThunkSection &isec) { Defined *d = addSymbol( saver.save("__microLA25Thunk_" + destination.getName()), STT_FUNC, 0, isec); d->stOther |= STO_MIPS_MICROMIPS; } InputSection *MicroMipsR6Thunk::getTargetInputSection() const { auto &dr = cast(destination); return dyn_cast(dr.section); } void elf::writePPC32PltCallStub(uint8_t *buf, uint64_t gotPltVA, const InputFile *file, int64_t addend) { if (!config->isPic) { write32(buf + 0, 0x3d600000 | (gotPltVA + 0x8000) >> 16); // lis r11,ha write32(buf + 4, 0x816b0000 | (uint16_t)gotPltVA); // lwz r11,l(r11) write32(buf + 8, 0x7d6903a6); // mtctr r11 write32(buf + 12, 0x4e800420); // bctr return; } uint32_t offset; if (addend >= 0x8000) { // The stub loads an address relative to r30 (.got2+Addend). Addend is // almost always 0x8000. The address of .got2 is different in another object // file, so a stub cannot be shared. offset = gotPltVA - (in.ppc32Got2->getParent()->getVA() + file->ppc32Got2OutSecOff + addend); } else { // The stub loads an address relative to _GLOBAL_OFFSET_TABLE_ (which is // currently the address of .got). offset = gotPltVA - in.got->getVA(); } uint16_t ha = (offset + 0x8000) >> 16, l = (uint16_t)offset; if (ha == 0) { write32(buf + 0, 0x817e0000 | l); // lwz r11,l(r30) write32(buf + 4, 0x7d6903a6); // mtctr r11 write32(buf + 8, 0x4e800420); // bctr write32(buf + 12, 0x60000000); // nop } else { write32(buf + 0, 0x3d7e0000 | ha); // addis r11,r30,ha write32(buf + 4, 0x816b0000 | l); // lwz r11,l(r11) write32(buf + 8, 0x7d6903a6); // mtctr r11 write32(buf + 12, 0x4e800420); // bctr } } void PPC32PltCallStub::writeTo(uint8_t *buf) { writePPC32PltCallStub(buf, destination.getGotPltVA(), file, addend); } void PPC32PltCallStub::addSymbols(ThunkSection &isec) { std::string buf; raw_string_ostream os(buf); os << format_hex_no_prefix(addend, 8); if (!config->isPic) os << ".plt_call32."; else if (addend >= 0x8000) os << ".got2.plt_pic32."; else os << ".plt_pic32."; os << destination.getName(); addSymbol(saver.save(os.str()), STT_FUNC, 0, isec); } bool PPC32PltCallStub::isCompatibleWith(const InputSection &isec, const Relocation &rel) const { return !config->isPic || (isec.file == file && rel.addend == addend); } void PPC32LongThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__LongThunk_" + destination.getName()), STT_FUNC, 0, isec); } void PPC32LongThunk::writeTo(uint8_t *buf) { auto ha = [](uint32_t v) -> uint16_t { return (v + 0x8000) >> 16; }; auto lo = [](uint32_t v) -> uint16_t { return v; }; uint32_t d = destination.getVA(addend); if (config->isPic) { uint32_t off = d - (getThunkTargetSym()->getVA() + 8); write32(buf + 0, 0x7c0802a6); // mflr r12,0 write32(buf + 4, 0x429f0005); // bcl r20,r31,.+4 write32(buf + 8, 0x7d8802a6); // mtctr r12 write32(buf + 12, 0x3d8c0000 | ha(off)); // addis r12,r12,off@ha write32(buf + 16, 0x398c0000 | lo(off)); // addi r12,r12,off@l write32(buf + 20, 0x7c0803a6); // mtlr r0 buf += 24; } else { write32(buf + 0, 0x3d800000 | ha(d)); // lis r12,d@ha write32(buf + 4, 0x398c0000 | lo(d)); // addi r12,r12,d@l buf += 8; } write32(buf + 0, 0x7d8903a6); // mtctr r12 write32(buf + 4, 0x4e800420); // bctr } void elf::writePPC64LoadAndBranch(uint8_t *buf, int64_t offset) { uint16_t offHa = (offset + 0x8000) >> 16; uint16_t offLo = offset & 0xffff; write32(buf + 0, 0x3d820000 | offHa); // addis r12, r2, OffHa write32(buf + 4, 0xe98c0000 | offLo); // ld r12, OffLo(r12) write32(buf + 8, 0x7d8903a6); // mtctr r12 write32(buf + 12, 0x4e800420); // bctr } void PPC64PltCallStub::writeTo(uint8_t *buf) { int64_t offset = destination.getGotPltVA() - getPPC64TocBase(); // Save the TOC pointer to the save-slot reserved in the call frame. write32(buf + 0, 0xf8410018); // std r2,24(r1) writePPC64LoadAndBranch(buf + 4, offset); } void PPC64PltCallStub::addSymbols(ThunkSection &isec) { Defined *s = addSymbol(saver.save("__plt_" + destination.getName()), STT_FUNC, 0, isec); s->needsTocRestore = true; s->file = destination.file; } +void PPC64R2SaveStub::writeTo(uint8_t *buf) { + int64_t offset = destination.getVA() - (getThunkTargetSym()->getVA() + 4); + // The branch offset needs to fit in 26 bits. + if (!isInt<26>(offset)) + fatal("R2 save stub branch offset is too large: " + Twine(offset)); + write32(buf + 0, 0xf8410018); // std r2,24(r1) + write32(buf + 4, 0x48000000 | (offset & 0x03fffffc)); // b +} + +void PPC64R2SaveStub::addSymbols(ThunkSection &isec) { + Defined *s = addSymbol(saver.save("__toc_save_" + destination.getName()), + STT_FUNC, 0, isec); + s->needsTocRestore = true; +} + void PPC64LongBranchThunk::writeTo(uint8_t *buf) { int64_t offset = in.ppc64LongBranchTarget->getEntryVA(&destination, addend) - getPPC64TocBase(); writePPC64LoadAndBranch(buf, offset); } void PPC64LongBranchThunk::addSymbols(ThunkSection &isec) { addSymbol(saver.save("__long_branch_" + destination.getName()), STT_FUNC, 0, isec); } Thunk::Thunk(Symbol &d, int64_t a) : destination(d), addend(a), offset(0) {} Thunk::~Thunk() = default; static Thunk *addThunkAArch64(RelType type, Symbol &s, int64_t a) { if (type != R_AARCH64_CALL26 && type != R_AARCH64_JUMP26 && type != R_AARCH64_PLT32) fatal("unrecognized relocation type"); if (config->picThunk) return make(s, a); return make(s, a); } // Creates a thunk for Thumb-ARM interworking. // Arm Architectures v5 and v6 do not support Thumb2 technology. This means // - MOVT and MOVW instructions cannot be used // - Only Thumb relocation that can generate a Thunk is a BL, this can always // be transformed into a BLX static Thunk *addThunkPreArmv7(RelType reloc, Symbol &s) { switch (reloc) { case R_ARM_PC24: case R_ARM_PLT32: case R_ARM_JUMP24: case R_ARM_CALL: case R_ARM_THM_CALL: if (config->picThunk) return make(s); return make(s); } fatal("relocation " + toString(reloc) + " to " + toString(s) + " not supported for Armv5 or Armv6 targets"); } // Create a thunk for Thumb long branch on V6-M. // Arm Architecture v6-M only supports Thumb instructions. This means // - MOVT and MOVW instructions cannot be used. // - Only a limited number of instructions can access registers r8 and above // - No interworking support is needed (all Thumb). static Thunk *addThunkV6M(RelType reloc, Symbol &s) { switch (reloc) { case R_ARM_THM_JUMP19: case R_ARM_THM_JUMP24: case R_ARM_THM_CALL: if (config->isPic) return make(s); return make(s); } fatal("relocation " + toString(reloc) + " to " + toString(s) + " not supported for Armv6-M targets"); } // Creates a thunk for Thumb-ARM interworking or branch range extension. static Thunk *addThunkArm(RelType reloc, Symbol &s) { // Decide which Thunk is needed based on: // Available instruction set // - An Arm Thunk can only be used if Arm state is available. // - A Thumb Thunk can only be used if Thumb state is available. // - Can only use a Thunk if it uses instructions that the Target supports. // Relocation is branch or branch and link // - Branch instructions cannot change state, can only select Thunk that // starts in the same state as the caller. // - Branch and link relocations can change state, can select Thunks from // either Arm or Thumb. // Position independent Thunks if we require position independent code. // Handle architectures that have restrictions on the instructions that they // can use in Thunks. The flags below are set by reading the BuildAttributes // of the input objects. InputFiles.cpp contains the mapping from ARM // architecture to flag. if (!config->armHasMovtMovw) { if (!config->armJ1J2BranchEncoding) return addThunkPreArmv7(reloc, s); return addThunkV6M(reloc, s); } switch (reloc) { case R_ARM_PC24: case R_ARM_PLT32: case R_ARM_JUMP24: case R_ARM_CALL: if (config->picThunk) return make(s); return make(s); case R_ARM_THM_JUMP19: case R_ARM_THM_JUMP24: case R_ARM_THM_CALL: if (config->picThunk) return make(s); return make(s); } fatal("unrecognized relocation type"); } static Thunk *addThunkMips(RelType type, Symbol &s) { if ((s.stOther & STO_MIPS_MICROMIPS) && isMipsR6()) return make(s); if (s.stOther & STO_MIPS_MICROMIPS) return make(s); return make(s); } static Thunk *addThunkPPC32(const InputSection &isec, const Relocation &rel, Symbol &s) { assert((rel.type == R_PPC_LOCAL24PC || rel.type == R_PPC_REL24 || rel.type == R_PPC_PLTREL24) && "unexpected relocation type for thunk"); if (s.isInPlt()) return make(isec, rel, s); return make(s, rel.addend); } static Thunk *addThunkPPC64(RelType type, Symbol &s, int64_t a) { assert((type == R_PPC64_REL14 || type == R_PPC64_REL24) && "unexpected relocation type for thunk"); if (s.isInPlt()) return make(s); + // This check looks at the st_other bits of the callee. If the value is 1 + // then the callee clobbers the TOC and we need an R2 save stub. + if ((s.stOther >> 5) == 1) + return make(s); + if (config->picThunk) return make(s, a); return make(s, a); } Thunk *elf::addThunk(const InputSection &isec, Relocation &rel) { Symbol &s = *rel.sym; int64_t a = rel.addend; if (config->emachine == EM_AARCH64) return addThunkAArch64(rel.type, s, a); if (config->emachine == EM_ARM) return addThunkArm(rel.type, s); if (config->emachine == EM_MIPS) return addThunkMips(rel.type, s); if (config->emachine == EM_PPC) return addThunkPPC32(isec, rel, s); if (config->emachine == EM_PPC64) return addThunkPPC64(rel.type, s, a); llvm_unreachable("add Thunk only supported for ARM, Mips and PowerPC"); } diff --git a/lld/test/ELF/ppc64-error-toc-local-call.s b/lld/test/ELF/ppc64-error-toc-local-call.s new file mode 100644 index 000000000000..f23eba101209 --- /dev/null +++ b/lld/test/ELF/ppc64-error-toc-local-call.s @@ -0,0 +1,32 @@ +# RUN: llvm-mc -filetype=obj -triple=powerpc64le %s -o %t.o +# RUN: not ld.lld %t.o -o /dev/null 2>&1 | FileCheck %s + +# RUN: llvm-mc -filetype=obj -triple=powerpc64 %s -o %t.o +# RUN: not ld.lld %t.o -o /dev/null 2>&1 | FileCheck %s + +## This test checks that the linker produces errors when it is missing the nop +## after a local call to a callee with st_other=1. + +# CHECK: (.text+0xC): call to save_callee lacks nop, can't restore toc +# CHECK: (.text+0x1C): call to save_callee lacks nop, can't restore toc + +callee: + .localentry callee, 1 + blr # 0x0 + +caller: +.Lfunc_gep1: + addis 2, 12, .TOC.-.Lfunc_gep1@ha + addi 2, 2, .TOC.-.Lfunc_gep1@l +.Lfunc_lep1: + .localentry caller, .Lfunc_lep1-.Lfunc_gep1 + bl callee # 0xC + blr + +caller_tail: +.Lfunc_gep2: + addis 2, 12, .TOC.-.Lfunc_gep2@ha + addi 2, 2, .TOC.-.Lfunc_gep2@l +.Lfunc_lep2: + .localentry caller_tail, .Lfunc_lep2-.Lfunc_gep2 + b callee # 0x1C diff --git a/lld/test/ELF/ppc64-toc-call-to-pcrel-long-jump.s b/lld/test/ELF/ppc64-toc-call-to-pcrel-long-jump.s new file mode 100644 index 000000000000..89f62c7de7ce --- /dev/null +++ b/lld/test/ELF/ppc64-toc-call-to-pcrel-long-jump.s @@ -0,0 +1,33 @@ +# REQUIRES: ppc +# RUN: echo 'SECTIONS { \ +# RUN: .text_callee 0x10010000 : { *(.text_callee) } \ +# RUN: .text_caller 0x20020000 : { *(.text_caller) } \ +# RUN: }' > %t.script + +# RUN: llvm-mc -filetype=obj -triple=powerpc64le %s -o %t.o +# RUN: not ld.lld -T %t.script %t.o -o %t 2>&1 >/dev/null | FileCheck %s + +# RUN: llvm-mc -filetype=obj -triple=powerpc64 %s -o %t.o +# RUN: not ld.lld -T %t.script %t.o -o %t 2>&1 >/dev/null | FileCheck %s + +# CHECK: error: R2 save stub branch offset is too large: -268501028 + +.section .text_callee, "ax", %progbits +callee: + .localentry callee, 1 + blr + +.section .text_caller, "ax", %progbits +caller: +.Lfunc_gep1: + addis 2, 12, .TOC.-.Lfunc_gep1@ha + addi 2, 2, .TOC.-.Lfunc_gep1@l +.Lfunc_lep1: + .localentry caller, .Lfunc_lep1-.Lfunc_gep1 + addis 30, 2, global@toc@ha + lwz 3, global@toc@l(30) + bl callee + nop + blr +global: + .long 0 diff --git a/lld/test/ELF/ppc64-toc-call-to-pcrel.s b/lld/test/ELF/ppc64-toc-call-to-pcrel.s new file mode 100644 index 000000000000..1807895a1914 --- /dev/null +++ b/lld/test/ELF/ppc64-toc-call-to-pcrel.s @@ -0,0 +1,74 @@ +# REQUIRES: ppc +# RUN: echo 'SECTIONS { \ +# RUN: .text_callee 0x10010000 : { *(.text_callee) } \ +# RUN: .text_caller 0x10020000 : { *(.text_caller) } \ +# RUN: }' > %t.script + +# RUN: llvm-mc -filetype=obj -triple=powerpc64le %s -o %t.o +# RUN: ld.lld -T %t.script %t.o -o %t +# RUN: llvm-readelf -s %t | FileCheck %s --check-prefix=SYMBOL +# RUN: llvm-objdump -d --no-show-raw-insn --mcpu=future %t | FileCheck %s + +# RUN: llvm-mc -filetype=obj -triple=powerpc64 %s -o %t.o +# RUN: ld.lld -T %t.script %t.o -o %t +# RUN: llvm-readelf -s %t | FileCheck %s --check-prefix=SYMBOL +# RUN: llvm-objdump -d --no-show-raw-insn --mcpu=future %t | FileCheck %s + +# The point of this test is to make sure that when a function with TOC access +# a local function with st_other=1, a TOC save stub is inserted. + +# SYMBOL: Symbol table '.symtab' contains 7 entries +# SYMBOL: 10010000 0 NOTYPE LOCAL DEFAULT [] 1 callee +# SYMBOL: 10020000 0 NOTYPE LOCAL DEFAULT [] 2 caller +# SYMBOL: 10020020 0 NOTYPE LOCAL DEFAULT [] 2 caller_14 +# SYMBOL: 1002003c 8 FUNC LOCAL DEFAULT 2 __toc_save_callee + +# CHECK-LABEL: callee +# CHECK: blr + +# CHECK-LABEL: caller +# CHECK: bl 0x1002003c +# CHECK-NEXT: ld 2, 24(1) +# CHECK-NEXT: blr + +# CHECK-LABEL: caller_14 +# CHECK: bfl 0, 0x1002003c +# CHECK-NEXT: ld 2, 24(1) +# CHECK-NEXT: blr + +# CHECK-LABEL: __toc_save_callee +# CHECK-NEXT: std 2, 24(1) +# CHECK-NEXT: b 0x10010000 + + +.section .text_callee, "ax", %progbits +callee: + .localentry callee, 1 + blr + +.section .text_caller, "ax", %progbits +caller: +.Lfunc_gep1: + addis 2, 12, .TOC.-.Lfunc_gep1@ha + addi 2, 2, .TOC.-.Lfunc_gep1@l +.Lfunc_lep1: + .localentry caller, .Lfunc_lep1-.Lfunc_gep1 + addis 30, 2, global@toc@ha + lwz 3, global@toc@l(30) + bl callee + nop + blr +global: + .long 0 + +caller_14: +.Lfunc_gep2: + addis 2, 12, .TOC.-.Lfunc_gep1@ha + addi 2, 2, .TOC.-.Lfunc_gep1@l +.Lfunc_lep2: + .localentry caller_14, .Lfunc_lep2-.Lfunc_gep2 + addis 30, 2, global@toc@ha + lwz 3, global@toc@l(30) + bcl 4, 0, callee + nop + blr