Index: include/llvm/MC/MCInstrAnalysis.h =================================================================== --- include/llvm/MC/MCInstrAnalysis.h +++ include/llvm/MC/MCInstrAnalysis.h @@ -23,6 +23,7 @@ namespace llvm { class MCRegisterInfo; +class Triple; class MCInstrAnalysis { protected: @@ -105,6 +106,13 @@ virtual bool evaluateBranch(const MCInst &Inst, uint64_t Addr, uint64_t Size, uint64_t &Target) const; + + /// Returns (PLT virtual address, GOT virtual address) pairs for PLT entries. + virtual std::vector> + findPltEntries(uint64_t PltSectionVA, ArrayRef PltContents, + uint64_t GotPltSectionVA, const Triple &TargetTriple) const { + return {}; + } }; } // end namespace llvm Index: include/llvm/Object/ELFObjectFile.h =================================================================== --- include/llvm/Object/ELFObjectFile.h +++ include/llvm/Object/ELFObjectFile.h @@ -86,6 +86,8 @@ void setARMSubArch(Triple &TheTriple) const override; virtual uint16_t getEType() const = 0; + + std::vector> getPltAddresses() const; }; class ELFSectionRef : public SectionRef { Index: lib/Object/ELFObjectFile.cpp =================================================================== --- lib/Object/ELFObjectFile.cpp +++ lib/Object/ELFObjectFile.cpp @@ -14,6 +14,7 @@ #include "llvm/Object/ELFObjectFile.h" #include "llvm/ADT/Triple.h" #include "llvm/BinaryFormat/ELF.h" +#include "llvm/MC/MCInstrAnalysis.h" #include "llvm/MC/SubtargetFeature.h" #include "llvm/Object/ELF.h" #include "llvm/Object/ELFTypes.h" @@ -23,6 +24,7 @@ #include "llvm/Support/Endian.h" #include "llvm/Support/ErrorHandling.h" #include "llvm/Support/MathExtras.h" +#include "llvm/Support/TargetRegistry.h" #include #include #include @@ -327,3 +329,66 @@ TheTriple.setArchName(Triple); } + +std::vector> +ELFObjectFileBase::getPltAddresses() const { + std::string Err; + const auto Triple = makeTriple(); + const auto *T = TargetRegistry::lookupTarget(Triple.str(), Err); + if (!T) + return {}; + uint64_t ExpectedType = 0; + switch (Triple.getArch()) { + case Triple::x86: + ExpectedType = ELF::R_386_JUMP_SLOT; + break; + case Triple::x86_64: + ExpectedType = ELF::R_X86_64_JUMP_SLOT; + break; + case Triple::aarch64: + ExpectedType = ELF::R_AARCH64_JUMP_SLOT; + break; + default: + return {}; + } + const auto *MIA = T->createMCInstrAnalysis(T->createMCInstrInfo()); + if (!MIA) + return {}; + const SectionRef *Plt = &*find_if(sections(), [&](const SectionRef &Sec) { + StringRef Name; + return !Sec.getName(Name) && Name == ".plt"; + }); + const SectionRef *RelaPlt = &*find_if(sections(), [&](const SectionRef &Sec) { + StringRef Name; + return !Sec.getName(Name) && (Name == ".rela.plt" || Name == ".rel.plt"); + }); + const SectionRef *GotPlt = &*find_if(sections(), [&](const SectionRef &Sec) { + StringRef Name; + return !Sec.getName(Name) && (Name == ".got.plt"); + }); + if (!Plt || !RelaPlt || !GotPlt) + return {}; + StringRef PltContents; + if (Plt->getContents(PltContents)) + return {}; + ArrayRef PltBytes((const uint8_t *)PltContents.data(), + Plt->getSize()); + auto PltEntries = MIA->findPltEntries(Plt->getAddress(), PltBytes, + GotPlt->getAddress(), Triple); + // Build a map from GOT entry virtual address to PLT entry virtual address. + DenseMap GotToPlt; + for (const auto &Entry : PltEntries) + GotToPlt.insert(std::make_pair(Entry.second, Entry.first)); + // Find the relocations in the dynamic relocation table that point to + // locations in the GOT for which we know the corresponding PLT entry. + std::vector> Result; + for (const auto &Relocation : RelaPlt->relocations()) { + if (Relocation.getType() != ExpectedType) + continue; + auto PltEntryIter = GotToPlt.find(Relocation.getOffset()); + if (PltEntryIter != GotToPlt.end()) + Result.push_back(std::make_pair( + Relocation.getSymbol()->getRawDataRefImpl(), PltEntryIter->second)); + } + return Result; +} Index: lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp =================================================================== --- lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp +++ lib/Target/AArch64/MCTargetDesc/AArch64MCTargetDesc.cpp @@ -153,6 +153,36 @@ } return false; } + + std::vector> + findPltEntries(uint64_t PltSectionVA, ArrayRef PltContents, + uint64_t GotPltSectionVA, + const Triple &TargetTriple) const override { + // Do a lightweight parsing of PLT entries. + std::vector> Result; + for (uint64_t Byte = 0, End = PltContents.size(); Byte + 7 < End; + Byte += 4) { + uint32_t Insn = (PltContents[Byte + 3] << 24) | + (PltContents[Byte + 2] << 16) | + (PltContents[Byte + 1] << 8) | (PltContents[Byte] << 0); + uint64_t Imm = 0; + // Check for adrp + if (Insn >> 31 == 1 && ((Insn >> 24) & 0x1f) == 0x10) { + Imm = (((PltSectionVA + Byte) >> 12) << 12) + + (((Insn >> 29) & 3) << 12) + (((Insn >> 5) & 0x3ffff) << 14); + uint32_t Insn2 = + (PltContents[Byte + 7] << 24) | (PltContents[Byte + 6] << 16) | + (PltContents[Byte + 5] << 8) | (PltContents[Byte + 4] << 0); + // Check for ldr + if (Insn2 >> 22 == 0x3e5) { + Imm += ((Insn2 >> 10) & 0xfff) << 3; + Result.push_back(std::make_pair(PltSectionVA + Byte, Imm)); + Byte += 4; + } + } + } + return Result; + } }; } // end anonymous namespace Index: lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp =================================================================== --- lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp +++ lib/Target/X86/MCTargetDesc/X86MCTargetDesc.cpp @@ -311,6 +311,9 @@ const MCInst &Inst) const override; bool clearsSuperRegisters(const MCRegisterInfo &MRI, const MCInst &Inst, APInt &Mask) const override; + std::vector> + findPltEntries(uint64_t PltSectionVA, ArrayRef PltContents, + uint64_t GotSectionVA, const Triple &TargetTriple) const; }; bool X86MCInstrAnalysis::isDependencyBreaking(const MCSubtargetInfo &STI, @@ -437,6 +440,63 @@ return Mask.getBoolValue(); } +static std::vector> +findX86PltEntries(uint64_t PltSectionVA, ArrayRef PltContents, + uint64_t GotPltSectionVA) { + // Do a lightweight parsing of PLT entries. + std::vector> Result; + for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) { + // Recognize a jmp followed by a push. + if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0xa3 && + PltContents[Byte + 6] == 0x68) { + // The jmp instruction at the beginning of each PLT entry jumps to the + // address of the base of the .got.plt section plus the immediate. + uint32_t Imm = + (PltContents[Byte + 2] << 0) | (PltContents[Byte + 3] << 8) | + (PltContents[Byte + 4] << 16) | (PltContents[Byte + 5] << 24); + Result.push_back( + std::make_pair(PltSectionVA + Byte, GotPltSectionVA + Imm)); + Byte += 6; + } else + Byte++; + } + return Result; +} + +static std::vector> +findX86_64PltEntries(uint64_t PltSectionVA, ArrayRef PltContents) { + // Do a lightweight parsing of PLT entries. + std::vector> Result; + for (uint64_t Byte = 0, End = PltContents.size(); Byte + 6 < End; ) { + // Recognize a jmp followed by a push. + if (PltContents[Byte] == 0xff && PltContents[Byte + 1] == 0x25 && + PltContents[Byte + 6] == 0x68) { + // The jmp instruction at the beginning of each PLT entry jumps to the + // address of the next instruction plus the immediate. + uint32_t Imm = + (PltContents[Byte + 2] << 0) | (PltContents[Byte + 3] << 8) | + (PltContents[Byte + 4] << 16) | (PltContents[Byte + 5] << 24); + Result.push_back(std::make_pair(PltSectionVA + Byte, PltSectionVA + Byte + 6 + Imm)); + Byte += 6; + } else + Byte++; + } + return Result; +} + +std::vector> X86MCInstrAnalysis::findPltEntries( + uint64_t PltSectionVA, ArrayRef PltContents, + uint64_t GotPltSectionVA, const Triple &TargetTriple) const { + switch (TargetTriple.getArch()) { + case Triple::x86: + return findX86PltEntries(PltSectionVA, PltContents, GotPltSectionVA); + case Triple::x86_64: + return findX86_64PltEntries(PltSectionVA, PltContents); + default: + return {}; + } +} + } // end of namespace X86_MC } // end of namespace llvm