Index: ELF/OutputSections.cpp =================================================================== --- ELF/OutputSections.cpp +++ ELF/OutputSections.cpp @@ -47,7 +47,7 @@ template GotPltSection::GotPltSection() : OutputSectionBase(".got.plt", SHT_PROGBITS, SHF_ALLOC | SHF_WRITE) { - this->Header.sh_addralign = sizeof(uintX_t); + this->Header.sh_addralign = Target->GotPltEntrySize; } template void GotPltSection::addEntry(SymbolBody &Sym) { @@ -61,12 +61,13 @@ template void GotPltSection::finalize() { this->Header.sh_size = - (Target->GotPltHeaderEntriesNum + Entries.size()) * sizeof(uintX_t); + (Target->GotPltHeaderEntriesNum + Entries.size()) * + Target->GotPltEntrySize; } template void GotPltSection::writeTo(uint8_t *Buf) { Target->writeGotPltHeader(Buf); - Buf += Target->GotPltHeaderEntriesNum * sizeof(uintX_t); + Buf += Target->GotPltHeaderEntriesNum * Target->GotPltEntrySize; for (const SymbolBody *B : Entries) { Target->writeGotPlt(Buf, *B); Buf += sizeof(uintX_t); @@ -78,7 +79,7 @@ : OutputSectionBase(".got", SHT_PROGBITS, SHF_ALLOC | SHF_WRITE) { if (Config->EMachine == EM_MIPS) this->Header.sh_flags |= SHF_MIPS_GPREL; - this->Header.sh_addralign = sizeof(uintX_t); + this->Header.sh_addralign = Target->GotEntrySize; } template Index: ELF/Symbols.cpp =================================================================== --- ELF/Symbols.cpp +++ ELF/Symbols.cpp @@ -165,7 +165,7 @@ } template typename ELFT::uint SymbolBody::getGotOffset() const { - return GotIndex * sizeof(typename ELFT::uint); + return GotIndex * Target->GotEntrySize; } template typename ELFT::uint SymbolBody::getGotPltVA() const { @@ -173,7 +173,7 @@ } template typename ELFT::uint SymbolBody::getGotPltOffset() const { - return GotPltIndex * sizeof(typename ELFT::uint); + return GotPltIndex * Target->GotPltEntrySize; } template typename ELFT::uint SymbolBody::getPltVA() const { Index: ELF/Target.h =================================================================== --- ELF/Target.h +++ ELF/Target.h @@ -83,6 +83,8 @@ uint32_t TlsOffsetRel; unsigned PltEntrySize; unsigned PltHeaderSize; + unsigned GotEntrySize; + unsigned GotPltEntrySize; // At least on x86_64 positions 1 and 2 are used by the first plt entry // to support lazy loading. Index: ELF/Target.cpp =================================================================== --- ELF/Target.cpp +++ ELF/Target.cpp @@ -313,6 +313,8 @@ TlsOffsetRel = R_386_TLS_DTPOFF32; PltEntrySize = 16; PltHeaderSize = 16; + GotEntrySize = 4; + GotPltEntrySize = 4; TlsGdRelaxSkip = 2; } @@ -556,6 +558,8 @@ TlsOffsetRel = R_X86_64_DTPOFF64; PltEntrySize = 16; PltHeaderSize = 16; + GotEntrySize = 8; + GotPltEntrySize = 8; TlsGdRelaxSkip = 2; } @@ -981,6 +985,8 @@ RelativeRel = R_PPC64_RELATIVE; PltEntrySize = 32; PltHeaderSize = 0; + GotEntrySize = 8; + GotPltEntrySize = 8; // We need 64K pages (at least under glibc/Linux, the loader won't // set different permissions on a finer granularity than that). @@ -1145,6 +1151,8 @@ TlsGotRel = R_AARCH64_TLS_TPREL64; PltEntrySize = 16; PltHeaderSize = 32; + GotEntrySize = 8; + GotPltEntrySize = 8; // It doesn't seem to be documented anywhere, but tls on aarch64 uses variant // 1 of the tls structures and the tcb size is 16. @@ -1485,6 +1493,8 @@ TlsOffsetRel = R_ARM_TLS_DTPOFF32; PltEntrySize = 16; PltHeaderSize = 20; + GotEntrySize = 4; + GotPltEntrySize = 4; } RelExpr ARMTargetInfo::getRelExpr(uint32_t Type, const SymbolBody &S) const { @@ -1792,6 +1802,8 @@ PageSize = 65536; PltEntrySize = 16; PltHeaderSize = 32; + GotEntrySize = sizeof(typename ELFT::uint); + GotPltEntrySize = sizeof(typename ELFT::uint); CopyRel = R_MIPS_COPY; PltRel = R_MIPS_JUMP_SLOT; if (ELFT::Is64Bits) {