Index: lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp =================================================================== --- lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp +++ lib/ExecutionEngine/RuntimeDyld/RuntimeDyldELF.cpp @@ -28,9 +28,37 @@ using namespace llvm; using namespace llvm::object; +using namespace llvm::support::endian; #define DEBUG_TYPE "dyld" +static void or32le(void *P, int32_t V) { write32le(P, read32le(P) | V); } + +static void or32AArch64Imm(void *L, uint64_t Imm) { + or32le(L, (Imm & 0xFFF) << 10); +} + +template static void write(bool isBE, void *P, T V) { + if (isBE) + write(P, V); + else + write(P, V); +} + +static void write32AArch64Addr(void *L, uint64_t Imm) { + uint32_t ImmLo = (Imm & 0x3) << 29; + uint32_t ImmHi = (Imm & 0x1FFFFC) << 3; + uint64_t Mask = (0x3 << 29) | (0x1FFFFC << 3); + write32le(L, (read32le(L) & ~Mask) | ImmLo | ImmHi); +} + +// Return the bits [Start, End] from Val shifted Start bits. +// For instance, getBits(0xF0, 4, 8) returns 0xF. +static uint64_t getBits(uint64_t Val, int Start, int End) { + uint64_t Mask = ((uint64_t)1 << (End + 1 - Start)) - 1; + return (Val >> Start) & Mask; +} + namespace { template class DyldELFObject : public ELFObjectFile { @@ -339,25 +367,14 @@ default: llvm_unreachable("Relocation type not implemented yet!"); break; - case ELF::R_AARCH64_ABS64: { - uint64_t *TargetPtr = - reinterpret_cast(Section.getAddressWithOffset(Offset)); - if (isBE) - support::ubig64_t::ref{TargetPtr} = Value + Addend; - else - support::ulittle64_t::ref{TargetPtr} = Value + Addend; + case ELF::R_AARCH64_ABS64: + write(isBE, TargetPtr, Value + Addend); break; - } case ELF::R_AARCH64_PREL32: { uint64_t Result = Value + Addend - FinalAddress; assert(static_cast(Result) >= INT32_MIN && static_cast(Result) <= UINT32_MAX); - if (isBE) - support::ubig32_t::ref{TargetPtr} = - static_cast(Result & 0xffffffffU); - else - support::ulittle32_t::ref{TargetPtr} = - static_cast(Result & 0xffffffffU); + write(isBE, TargetPtr, static_cast(Result & 0xffffffffU)); break; } case ELF::R_AARCH64_CALL26: // fallthrough @@ -365,122 +382,45 @@ // Operation: S+A-P. Set Call or B immediate value to bits fff_fffc of the // calculation. uint64_t BranchImm = Value + Addend - FinalAddress; - uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; - - // "Check that -2^27 <= result < 2^27". assert(isInt<28>(BranchImm)); - // AArch64 code is emitted with .rela relocations. The data already in any - // bits affected by the relocation on entry is garbage. - TargetValue &= 0xfc000000U; - // Immediate goes in bits 25:0 of B and BL. - TargetValue |= static_cast(BranchImm & 0xffffffcU) >> 2; - support::ulittle32_t::ref{TargetPtr} = TargetValue; - break; - } - case ELF::R_AARCH64_MOVW_UABS_G3: { - uint64_t Result = Value + Addend; - uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; - - // AArch64 code is emitted with .rela relocations. The data already in any - // bits affected by the relocation on entry is garbage. - TargetValue &= 0xffe0001fU; - // Immediate goes in bits 20:5 of MOVZ/MOVK instruction - TargetValue |= ((Result & 0xffff000000000000ULL) >> (48 - 5)); - // Shift must be "lsl #48", in bits 22:21 - assert((TargetValue >> 21 & 0x3) == 3 && "invalid shift for relocation"); - support::ulittle32_t::ref{TargetPtr} = TargetValue; - break; - } - case ELF::R_AARCH64_MOVW_UABS_G2_NC: { - uint64_t Result = Value + Addend; - uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; - - // AArch64 code is emitted with .rela relocations. The data already in any - // bits affected by the relocation on entry is garbage. - TargetValue &= 0xffe0001fU; - // Immediate goes in bits 20:5 of MOVZ/MOVK instruction - TargetValue |= ((Result & 0xffff00000000ULL) >> (32 - 5)); - // Shift must be "lsl #32", in bits 22:21 - assert((TargetValue >> 21 & 0x3) == 2 && "invalid shift for relocation"); - support::ulittle32_t::ref{TargetPtr} = TargetValue; - break; - } - case ELF::R_AARCH64_MOVW_UABS_G1_NC: { - uint64_t Result = Value + Addend; - uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; - - // AArch64 code is emitted with .rela relocations. The data already in any - // bits affected by the relocation on entry is garbage. - TargetValue &= 0xffe0001fU; - // Immediate goes in bits 20:5 of MOVZ/MOVK instruction - TargetValue |= ((Result & 0xffff0000U) >> (16 - 5)); - // Shift must be "lsl #16", in bits 22:2 - assert((TargetValue >> 21 & 0x3) == 1 && "invalid shift for relocation"); - support::ulittle32_t::ref{TargetPtr} = TargetValue; - break; - } - case ELF::R_AARCH64_MOVW_UABS_G0_NC: { - uint64_t Result = Value + Addend; - uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; - - // AArch64 code is emitted with .rela relocations. The data already in any - // bits affected by the relocation on entry is garbage. - TargetValue &= 0xffe0001fU; - // Immediate goes in bits 20:5 of MOVZ/MOVK instruction - TargetValue |= ((Result & 0xffffU) << 5); - // Shift must be "lsl #0", in bits 22:21. - assert((TargetValue >> 21 & 0x3) == 0 && "invalid shift for relocation"); - support::ulittle32_t::ref{TargetPtr} = TargetValue; + // "Check that -2^27 <= result < 2^27". + or32le(TargetPtr, (BranchImm & 0x0FFFFFFC) >> 2); break; } + case ELF::R_AARCH64_MOVW_UABS_G3: + or32le(TargetPtr, ((Value + Addend) & 0xFFFF000000000000) >> 43); + break; + case ELF::R_AARCH64_MOVW_UABS_G2_NC: + or32le(TargetPtr, ((Value + Addend) & 0xFFFF00000000) >> 27); + break; + case ELF::R_AARCH64_MOVW_UABS_G1_NC: + or32le(TargetPtr, ((Value + Addend) & 0xFFFF0000) >> 11); + break; + case ELF::R_AARCH64_MOVW_UABS_G0_NC: + or32le(TargetPtr, ((Value + Addend) & 0xFFFF) << 5); + break; case ELF::R_AARCH64_ADR_PREL_PG_HI21: { // Operation: Page(S+A) - Page(P) uint64_t Result = ((Value + Addend) & ~0xfffULL) - (FinalAddress & ~0xfffULL); - uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; // Check that -2^32 <= X < 2^32 assert(isInt<33>(Result) && "overflow check failed for relocation"); - - // AArch64 code is emitted with .rela relocations. The data already in any - // bits affected by the relocation on entry is garbage. - TargetValue &= 0x9f00001fU; - // Immediate goes in bits 30:29 + 5:23 of ADRP instruction, taken - // from bits 32:12 of X. - TargetValue |= ((Result & 0x3000U) << (29 - 12)); - TargetValue |= ((Result & 0x1ffffc000ULL) >> (14 - 5)); - support::ulittle32_t::ref{TargetPtr} = TargetValue; + write32AArch64Addr(TargetPtr, Result >> 12); break; } - case ELF::R_AARCH64_LDST32_ABS_LO12_NC: { + case ELF::R_AARCH64_ADD_ABS_LO12_NC: // Operation: S + A - uint64_t Result = Value + Addend; - uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; - - // AArch64 code is emitted with .rela relocations. The data already in any - // bits affected by the relocation on entry is garbage. - TargetValue &= 0xffc003ffU; - // Immediate goes in bits 21:10 of LD/ST instruction, taken - // from bits 11:2 of X - TargetValue |= ((Result & 0xffc) << (10 - 2)); - support::ulittle32_t::ref{TargetPtr} = TargetValue; + or32AArch64Imm(TargetPtr, Value + Addend); break; - } - case ELF::R_AARCH64_LDST64_ABS_LO12_NC: { + case ELF::R_AARCH64_LDST32_ABS_LO12_NC: // Operation: S + A - uint64_t Result = Value + Addend; - uint32_t TargetValue = support::ulittle32_t::ref{TargetPtr}; - - // AArch64 code is emitted with .rela relocations. The data already in any - // bits affected by the relocation on entry is garbage. - TargetValue &= 0xffc003ffU; - // Immediate goes in bits 21:10 of LD/ST instruction, taken - // from bits 11:3 of X - TargetValue |= ((Result & 0xff8) << (10 - 3)); - support::ulittle32_t::ref{TargetPtr} = TargetValue; + or32AArch64Imm(TargetPtr, getBits(Value + Addend, 2, 11)); break; - } + case ELF::R_AARCH64_LDST64_ABS_LO12_NC: + // Operation: S + A + or32AArch64Imm(TargetPtr, getBits(Value + Addend, 3, 11)); } } Index: test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s =================================================================== --- test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s +++ test/ExecutionEngine/RuntimeDyld/AArch64/ELF_ARM64_relocations.s @@ -14,6 +14,9 @@ movk x0, #:abs_g1_nc:f # R_AARCH64_MOVW_UABS_G0_NC movk x0, #:abs_g0_nc:f +a: +# R_AARCH64_ADD_ABS_LO12_NC + add x0, x0, :lo12:f ret .Lfunc_end0: .size g, .Lfunc_end0-g @@ -31,3 +34,7 @@ # rtdyld-check: *{4}(g + 8) = 0xf2b13560 # rtdyld-check: *{4}(g + 12) = 0xf299bde0 # rtdyld-check: *{8}k = f + +## f & 0xFFF = 0xdef (bits 11:0 of f) +## 0xdef << 10 = 0x37bc00 +# rtdyld-check: *{4}(a) = 0x9137bc00