Index: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp +++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp @@ -41,6 +41,8 @@ return AArch64::NumTargetFixupKinds; } + Optional getFixupKind(StringRef Name) const override; + const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override { const static MCFixupKindInfo Infos[AArch64::NumTargetFixupKinds] = { // This table *must* be in the order that the fixup_* kinds are defined @@ -103,6 +105,7 @@ default: llvm_unreachable("Unknown fixup kind!"); + case FK_NONE: case AArch64::fixup_aarch64_tlsdesc_call: return 0; @@ -304,6 +307,7 @@ if (Value & 0x3) Ctx.reportError(Fixup.getLoc(), "fixup not sufficiently aligned"); return (Value >> 2) & 0x3ffffff; + case FK_NONE: case FK_Data_1: case FK_Data_2: case FK_Data_4: @@ -314,6 +318,12 @@ } } +Optional AArch64AsmBackend::getFixupKind(StringRef Name) const { + if (TheTriple.isOSBinFormatELF() && Name == "R_AARCH64_NONE") + return FK_NONE; + return MCAsmBackend::getFixupKind(Name); +} + /// getFixupKindContainereSizeInBytes - The number of bytes of the /// container involved in big endian or 0 if the item is little endian unsigned AArch64AsmBackend::getFixupKindContainereSizeInBytes(unsigned Kind) const { @@ -445,6 +455,10 @@ bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target) { + unsigned Kind = Fixup.getKind(); + if (Kind == FK_NONE) + return true; + // The ADRP instruction adds some multiple of 0x1000 to the current PC & // ~0xfff. This means that the required offset to reach a symbol can vary by // up to one step depending on where the ADRP is in memory. For example: @@ -457,14 +471,14 @@ // same page as the ADRP and the instruction should encode 0x0. Assuming the // section isn't 0x1000-aligned, we therefore need to delegate this decision // to the linker -- a relocation! - if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_pcrel_adrp_imm21) + if (Kind == AArch64::fixup_aarch64_pcrel_adrp_imm21) return true; AArch64MCExpr::VariantKind RefKind = static_cast(Target.getRefKind()); AArch64MCExpr::VariantKind SymLoc = AArch64MCExpr::getSymbolLoc(RefKind); // LDR GOT relocations need a relocation - if ((uint32_t)Fixup.getKind() == AArch64::fixup_aarch64_ldr_pcrel_imm19 && + if (Kind == AArch64::fixup_aarch64_ldr_pcrel_imm19 && SymLoc == AArch64MCExpr::VK_GOT) return true; return false; Index: llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp =================================================================== --- llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp +++ llvm/trunk/lib/Target/AArch64/MCTargetDesc/AArch64ELFObjectWriter.cpp @@ -185,6 +185,8 @@ if (IsILP32 && isNonILP32reloc(Fixup, RefKind, Ctx)) return ELF::R_AARCH64_NONE; switch ((unsigned)Fixup.getKind()) { + case FK_NONE: + return ELF::R_AARCH64_NONE; case FK_Data_1: Ctx.reportError(Fixup.getLoc(), "1-byte data relocations not supported"); return ELF::R_AARCH64_NONE; Index: llvm/trunk/test/MC/AArch64/reloc-directive.s =================================================================== --- llvm/trunk/test/MC/AArch64/reloc-directive.s +++ llvm/trunk/test/MC/AArch64/reloc-directive.s @@ -0,0 +1,25 @@ +# RUN: llvm-mc -triple=aarch64-linux-musl %s | FileCheck --check-prefix=PRINT %s + +# RUN: llvm-mc -filetype=obj -triple=aarch64-linux-musl %s | llvm-readobj -r | FileCheck %s + +# PRINT: .reloc 8, R_AARCH64_NONE, .data +# PRINT: .reloc 4, R_AARCH64_NONE, foo+4 +# PRINT: .reloc 0, R_AARCH64_NONE, 8 +.text + ret + nop + nop + .reloc 8, R_AARCH64_NONE, .data + .reloc 4, R_AARCH64_NONE, foo+4 + .reloc 0, R_AARCH64_NONE, 8 + +.data +.globl foo +foo: + .word 0 + .word 0 + .word 0 + +# CHECK: 0x8 R_AARCH64_NONE .data 0x0 +# CHECK-NEXT: 0x4 R_AARCH64_NONE foo 0x4 +# CHECK-NEXT: 0x0 R_AARCH64_NONE - 0x8