Index: ELF/InputSection.cpp =================================================================== --- ELF/InputSection.cpp +++ ELF/InputSection.cpp @@ -147,6 +147,10 @@ } else if (!Target->relocNeedsCopy(Type, Body) && isa>(Body)) { continue; + } else if (Body.isTLS() && Target->getTlsOptimization(Type, Body)) { + Target->relocateTlsOptimize(BufLoc, Buf, BufEnd, Type, AddrLoc, + SymVA); + continue; } Target->relocateOne(BufLoc, BufEnd, Type, AddrLoc, SymVA + getAddend(RI)); Index: ELF/Target.h =================================================================== --- ELF/Target.h +++ ELF/Target.h @@ -57,7 +57,10 @@ virtual bool relocNeedsPlt(uint32_t Type, const SymbolBody &S) const = 0; virtual void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, uint64_t SA) const = 0; - + virtual bool getTlsOptimization(unsigned Type, const SymbolBody &S) const; + virtual void relocateTlsOptimize(uint8_t *Loc, uint8_t *BufStart, + uint8_t *BufEnd, uint32_t Type, uint64_t P, + uint64_t SA) const; virtual ~TargetInfo(); protected: Index: ELF/Target.cpp =================================================================== --- ELF/Target.cpp +++ ELF/Target.cpp @@ -73,6 +73,10 @@ void relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, uint64_t SA) const override; bool isRelRelative(uint32_t Type) const override; + bool getTlsOptimization(unsigned Type, const SymbolBody &S) const override; + void relocateTlsOptimize(uint8_t *Loc, uint8_t *BufStart, uint8_t *BufEnd, + uint32_t Type, uint64_t P, + uint64_t SA) const override; }; class PPC64TargetInfo final : public TargetInfo { @@ -145,6 +149,10 @@ TargetInfo::~TargetInfo() {} +bool TargetInfo::getTlsOptimization(unsigned Type, const SymbolBody &S) const { + return false; +} + uint64_t TargetInfo::getVAStart() const { return Config->Shared ? 0 : VAStart; } bool TargetInfo::relocNeedsCopy(uint32_t Type, const SymbolBody &S) const { @@ -159,6 +167,10 @@ void TargetInfo::writeGotHeaderEntries(uint8_t *Buf) const {} +void TargetInfo::relocateTlsOptimize(uint8_t *Loc, uint8_t *BufStart, + uint8_t *BufEnd, uint32_t Type, uint64_t P, + uint64_t SA) const {} + X86TargetInfo::X86TargetInfo() { PCRelReloc = R_386_PC32; GotReloc = R_386_GLOB_DAT; @@ -268,6 +280,8 @@ } bool X86_64TargetInfo::relocNeedsGot(uint32_t Type, const SymbolBody &S) const { + if (Type == R_X86_64_GOTTPOFF) + return !getTlsOptimization(Type, S); return Type == R_X86_64_GOTTPOFF || Type == R_X86_64_GOTPCREL || relocNeedsPlt(Type, S); } @@ -333,6 +347,51 @@ } } +bool X86_64TargetInfo::getTlsOptimization(unsigned Type, + const SymbolBody &S) const { + if (Config->Shared) + return false; + return (Type == R_X86_64_GOTTPOFF && !canBePreempted(&S, true)); +} + +// Like written in "Ulrich Drepper, ELF Handling For Thread-Local Storage" (5.5 +// x86-x64 linker optimizations, http://www.akkadia.org/drepper/tls.pdf), +// R_X86_64_GOTTPOFF relocation can be optimized to R_X86_64_TPOFF32. +// Also it is told (6.5 New x86-64 ELF definitions) that @gottpoff(%rip) must be +// used in movq or addq instructions only. +// So this must be one of two: +// 1) movq foo@gottpoff(%rip), %reg +// 2) addq foo@gottpoff(%rip), %reg +// First is converted to movq $foo, %reg. +// Second is converted to leaq foo(%reg), %reg or addq $foo, %reg for r12/rsp. +// Opcodes info can be found at http://ref.x86asm.net/coder64.html#x48. +void X86_64TargetInfo::relocateTlsOptimize(uint8_t *Loc, uint8_t *BufStart, + uint8_t *BufEnd, uint32_t Type, + uint64_t P, uint64_t SA) const { + if (Loc - 3 < BufStart) + error("Tls relocation optimization fail, buffer overrun !"); + uint8_t *Prefix = &Loc[-3]; + uint8_t *Instruct = &Loc[-2]; + uint8_t *RegSlot = &Loc[-1]; + uint8_t Reg = (Loc[-1]) >> 3; + bool IsMov = *Instruct == 0x8b; + bool RspAdd = !IsMov && Reg == 4; + // r12 and rsp registers requires special handling. + // Problem is that for other registers, for example leaq 0xXXXXXXXX(%r11),%r11 + // result out is 7 bytes: 4d 8d 9b XX XX XX XX, + // but leaq 0xXXXXXXXX(%r12),%r12 is 8 bytes: 4d 8d a4 24 XX XX XX XX. + // The same true for rsp. So we convert to addq for them, saving 1 byte that + // we dont have. + if (RspAdd) + *Instruct = 0x81; + else + *Instruct = IsMov ? 0xc7 : 0x8d; + if (*Prefix == 0x4c) + *Prefix = (IsMov || RspAdd) ? 0x49 : 0x4d; + *RegSlot = (IsMov || RspAdd) ? (0xc0 | Reg) : (0x80 | Reg | (Reg << 3)); + relocateOne(Loc, BufEnd, R_X86_64_TPOFF32, P, SA); +} + void X86_64TargetInfo::relocateOne(uint8_t *Loc, uint8_t *BufEnd, uint32_t Type, uint64_t P, uint64_t SA) const { switch (Type) { Index: test/elf2/tls-opt.s =================================================================== --- test/elf2/tls-opt.s +++ test/elf2/tls-opt.s @@ -0,0 +1,55 @@ +// RUN: llvm-mc -filetype=obj -triple=x86_64-unknown-linux %s -o %t.o +// RUN: ld.lld2 -e main %t.o -o %t1 +// RUN: llvm-readobj -r %t1 | FileCheck --check-prefix=NORELOC %s +// RUN: llvm-objdump -d %t1 | FileCheck --check-prefix=DISASM %s + +// NORELOC: Relocations [ +// NORELOC-NEXT: ] + +// DISASM: Disassembly of section .text: +// DISASM-NEXT: main: +// DISASM-NEXT: 11000: 48 c7 c0 f8 ff ff ff movq $-8, %rax +// DISASM-NEXT: 11007: 49 c7 c7 f8 ff ff ff movq $-8, %r15 +// DISASM-NEXT: 1100e: 48 8d 80 f8 ff ff ff leaq -8(%rax), %rax +// DISASM-NEXT: 11015: 4d 8d bf f8 ff ff ff leaq -8(%r15), %r15 +// DISASM-NEXT: 1101c: 48 81 c4 f8 ff ff ff addq $-8, %rsp +// DISASM-NEXT: 11023: 49 81 c4 f8 ff ff ff addq $-8, %r12 +// DISASM-NEXT: 1102a: 48 c7 c0 fc ff ff ff movq $-4, %rax +// DISASM-NEXT: 11031: 49 c7 c7 fc ff ff ff movq $-4, %r15 +// DISASM-NEXT: 11038: 48 8d 80 fc ff ff ff leaq -4(%rax), %rax +// DISASM-NEXT: 1103f: 4d 8d bf fc ff ff ff leaq -4(%r15), %r15 +// DISASM-NEXT: 11046: 48 81 c4 fc ff ff ff addq $-4, %rsp +// DISASM-NEXT: 1104d: 49 81 c4 fc ff ff ff addq $-4, %r12 + +.type tls0,@object +.section .tbss,"awT",@nobits +.globl tls0 +.align 4 +tls0: + .long 0 + .size tls0, 4 + +.type tls1,@object +.globl tls1 +.align 4 +tls1: + .long 0 + .size tls1, 4 + +.text + .globl main + .align 16, 0x90 + .type main,@function +main: + movq tls0@GOTTPOFF(%rip), %rax + movq tls0@GOTTPOFF(%rip), %r15 + addq tls0@GOTTPOFF(%rip), %rax + addq tls0@GOTTPOFF(%rip), %r15 + addq tls0@GOTTPOFF(%rip), %rsp + addq tls0@GOTTPOFF(%rip), %r12 + movq tls1@GOTTPOFF(%rip), %rax + movq tls1@GOTTPOFF(%rip), %r15 + addq tls1@GOTTPOFF(%rip), %rax + addq tls1@GOTTPOFF(%rip), %r15 + addq tls1@GOTTPOFF(%rip), %rsp + addq tls1@GOTTPOFF(%rip), %r12