diff --git a/llvm/docs/Extensions.rst b/llvm/docs/Extensions.rst --- a/llvm/docs/Extensions.rst +++ b/llvm/docs/Extensions.rst @@ -495,7 +495,7 @@ Relocations ^^^^^^^^^^^ -``@ABS8`` can be applied to symbols which appear as immediate operands to +**@ABS8** can be applied to symbols which appear as immediate operands to instructions that have an 8-bit immediate form for that operand. It causes the assembler to use the 8-bit form and an 8-bit relocation (e.g. ``R_386_8`` or ``R_X86_64_8``) for the symbol. @@ -511,6 +511,11 @@ opposed to ``cmpq $foo, %rdi`` which takes a 32-bit immediate operand. This is also not the same as ``cmpb $foo, %dil``, which is an 8-bit comparison. + +**@GOTPCREL_NORELAX** can be used in place of ``@GOTPCREL`` to guarantee that +the assembler emits an ``R_X86_64_GOTPCREL`` relocation instead of a relaxable +``R_X86_64[_REX]_GOTPCRELX`` relocation. + Windows on ARM -------------- diff --git a/llvm/include/llvm/MC/MCExpr.h b/llvm/include/llvm/MC/MCExpr.h --- a/llvm/include/llvm/MC/MCExpr.h +++ b/llvm/include/llvm/MC/MCExpr.h @@ -200,6 +200,7 @@ VK_GOTREL, VK_PCREL, VK_GOTPCREL, + VK_GOTPCREL_NORELAX, VK_GOTTPOFF, VK_INDNTPOFF, VK_NTPOFF, diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp --- a/llvm/lib/MC/ELFObjectWriter.cpp +++ b/llvm/lib/MC/ELFObjectWriter.cpp @@ -1311,6 +1311,7 @@ case MCSymbolRefExpr::VK_GOT: case MCSymbolRefExpr::VK_PLT: case MCSymbolRefExpr::VK_GOTPCREL: + case MCSymbolRefExpr::VK_GOTPCREL_NORELAX: case MCSymbolRefExpr::VK_PPC_GOT_LO: case MCSymbolRefExpr::VK_PPC_GOT_HI: case MCSymbolRefExpr::VK_PPC_GOT_HA: diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp --- a/llvm/lib/MC/MCExpr.cpp +++ b/llvm/lib/MC/MCExpr.cpp @@ -230,6 +230,7 @@ case VK_GOTREL: return "GOTREL"; case VK_PCREL: return "PCREL"; case VK_GOTPCREL: return "GOTPCREL"; + case VK_GOTPCREL_NORELAX: return "GOTPCREL_NORELAX"; case VK_GOTTPOFF: return "GOTTPOFF"; case VK_INDNTPOFF: return "INDNTPOFF"; case VK_NTPOFF: return "NTPOFF"; @@ -394,6 +395,7 @@ .Case("gotrel", VK_GOTREL) .Case("pcrel", VK_PCREL) .Case("gotpcrel", VK_GOTPCREL) + .Case("gotpcrel_norelax", VK_GOTPCREL_NORELAX) .Case("gottpoff", VK_GOTTPOFF) .Case("indntpoff", VK_INDNTPOFF) .Case("ntpoff", VK_NTPOFF) diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h --- a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -441,6 +441,11 @@ /// SYMBOL_LABEL @GOTPCREL MO_GOTPCREL, + /// MO_GOTPCREL_NORELAX - Same as MO_GOTPCREL except that R_X86_64_GOTPCREL + /// relocations are guaranteed to be emitted by the integrated assembler + /// instead of the relaxable R_X86_64[_REX]_GOTPCRELX relocations. + MO_GOTPCREL_NORELAX, + /// MO_PLT - On a symbol operand this indicates that the immediate is /// offset to the PLT entry of symbol name from the current code location. /// diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp @@ -218,6 +218,9 @@ return ELF::R_X86_64_REX_GOTPCRELX; } llvm_unreachable("unexpected relocation type!"); + case MCSymbolRefExpr::VK_GOTPCREL_NORELAX: + checkIs32(Ctx, Loc, Type); + return ELF::R_X86_64_GOTPCREL; case MCSymbolRefExpr::VK_X86_PLTOFF: checkIs64(Ctx, Loc, Type); return ELF::R_X86_64_PLTOFF64; diff --git a/llvm/lib/Target/X86/X86.td b/llvm/lib/Target/X86/X86.td --- a/llvm/lib/Target/X86/X86.td +++ b/llvm/lib/Target/X86/X86.td @@ -378,8 +378,7 @@ : SubtargetFeature< "tagged-globals", "AllowTaggedGlobals", "true", "Use an instruction sequence for taking the address of a global " - "that allows a memory tag in the upper address bits. Currently " - "also requires -mrelax-relocations=no to work properly.">; + "that allows a memory tag in the upper address bits.">; //===----------------------------------------------------------------------===// // X86 Subtarget Tuning features diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp --- a/llvm/lib/Target/X86/X86AsmPrinter.cpp +++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp @@ -190,6 +190,7 @@ case X86II::MO_NTPOFF: O << "@NTPOFF"; break; case X86II::MO_GOTNTPOFF: O << "@GOTNTPOFF"; break; case X86II::MO_GOTPCREL: O << "@GOTPCREL"; break; + case X86II::MO_GOTPCREL_NORELAX: O << "@GOTPCREL_NORELAX"; break; case X86II::MO_GOT: O << "@GOT"; break; case X86II::MO_GOTOFF: O << "@GOTOFF"; break; case X86II::MO_PLT: O << "@PLT"; break; diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -789,7 +789,8 @@ RC = &X86::GR32RegClass; } - if (Subtarget->isPICStyleRIPRel() || GVFlags == X86II::MO_GOTPCREL) + if (Subtarget->isPICStyleRIPRel() || GVFlags == X86II::MO_GOTPCREL || + GVFlags == X86II::MO_GOTPCREL_NORELAX) StubAM.Base.Reg = X86::RIP; LoadReg = createResultReg(RC); @@ -3486,6 +3487,7 @@ // NonLazyBind calls or dllimport calls. bool NeedLoad = OpFlags == X86II::MO_DLLIMPORT || OpFlags == X86II::MO_GOTPCREL || + OpFlags == X86II::MO_GOTPCREL_NORELAX || OpFlags == X86II::MO_COFFSTUB; unsigned CallOpc = NeedLoad ? (Is64Bit ? X86::CALL64m : X86::CALL32m) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -19751,7 +19751,7 @@ return X86ISD::WrapperRIP; // GOTPCREL references must always use RIP. - if (OpFlags == X86II::MO_GOTPCREL) + if (OpFlags == X86II::MO_GOTPCREL || OpFlags == X86II::MO_GOTPCREL_NORELAX) return X86ISD::WrapperRIP; return X86ISD::Wrapper; diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -75,6 +75,7 @@ switch (TargetFlag) { case X86II::MO_DLLIMPORT: // dllimport stub. case X86II::MO_GOTPCREL: // rip-relative GOT reference. + case X86II::MO_GOTPCREL_NORELAX: // rip-relative GOT reference. case X86II::MO_GOT: // normal GOT reference. case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Normal $non_lazy_ptr ref. case X86II::MO_DARWIN_NONLAZY: // Normal $non_lazy_ptr ref. diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -9003,6 +9003,7 @@ {MO_GOT, "x86-got"}, {MO_GOTOFF, "x86-gotoff"}, {MO_GOTPCREL, "x86-gotpcrel"}, + {MO_GOTPCREL_NORELAX, "x86-gotpcrel-norelax"}, {MO_PLT, "x86-plt"}, {MO_TLSGD, "x86-tlsgd"}, {MO_TLSLD, "x86-tlsld"}, diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -277,6 +277,9 @@ case X86II::MO_GOTPCREL: RefKind = MCSymbolRefExpr::VK_GOTPCREL; break; + case X86II::MO_GOTPCREL_NORELAX: + RefKind = MCSymbolRefExpr::VK_GOTPCREL_NORELAX; + break; case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break; diff --git a/llvm/lib/Target/X86/X86Subtarget.cpp b/llvm/lib/Target/X86/X86Subtarget.cpp --- a/llvm/lib/Target/X86/X86Subtarget.cpp +++ b/llvm/lib/Target/X86/X86Subtarget.cpp @@ -72,7 +72,7 @@ // errors, so we go through the GOT instead. if (AllowTaggedGlobals && TM.getCodeModel() == CodeModel::Small && GV && !isa(GV)) - return X86II::MO_GOTPCREL; + return X86II::MO_GOTPCREL_NORELAX; // If we're not PIC, it's not very interesting. if (!isPositionIndependent()) @@ -167,6 +167,11 @@ // reference for them. if (TM.getCodeModel() == CodeModel::Large) return isTargetELF() ? X86II::MO_GOT : X86II::MO_NO_FLAG; + // Tagged globals have non-zero upper bits, which makes direct references + // require a 64-bit immediate. So we can't let the linker relax the + // relocation to a 32-bit RIP-relative direct reference. + if (AllowTaggedGlobals && GV && !isa(GV)) + return X86II::MO_GOTPCREL_NORELAX; return X86II::MO_GOTPCREL; } diff --git a/llvm/test/CodeGen/X86/tagged-globals-pic.ll b/llvm/test/CodeGen/X86/tagged-globals-pic.ll --- a/llvm/test/CodeGen/X86/tagged-globals-pic.ll +++ b/llvm/test/CodeGen/X86/tagged-globals-pic.ll @@ -1,4 +1,5 @@ ; RUN: llc --relocation-model=pic < %s | FileCheck %s +; RUN: llc --relocation-model=pic --relax-elf-relocations --filetype=obj -o - < %s | llvm-objdump -d -r - | FileCheck %s --check-prefix=OBJ target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128" target triple = "x86_64-unknown-linux-gnu" @@ -8,28 +9,40 @@ define i32* @global_addr() #0 { ; CHECK-LABEL: global_addr: - ; CHECK: movq global@GOTPCREL(%rip), %rax + ; CHECK: movq global@GOTPCREL_NORELAX(%rip), %rax ; CHECK: retq + ; OBJ-LABEL: : + ; OBJ: movq (%rip), + ; OBJ-NEXT: R_X86_64_GOTPCREL global + ret i32* @global } define i32 @global_load() #0 { ; CHECK-LABEL: global_load: - ; CHECK: movq global@GOTPCREL(%rip), [[REG:%r[0-9a-z]+]] + ; CHECK: movq global@GOTPCREL_NORELAX(%rip), [[REG:%r[0-9a-z]+]] ; CHECK: movl ([[REG]]), %eax ; CHECK: retq + ; OBJ-LABEL: : + ; OBJ: movq (%rip), + ; OBJ-NEXT: R_X86_64_GOTPCREL global + %load = load i32, i32* @global ret i32 %load } define void @global_store() #0 { ; CHECK-LABEL: global_store: - ; CHECK: movq global@GOTPCREL(%rip), [[REG:%r[0-9a-z]+]] + ; CHECK: movq global@GOTPCREL_NORELAX(%rip), [[REG:%r[0-9a-z]+]] ; CHECK: movl $0, ([[REG]]) ; CHECK: retq + ; OBJ-LABEL: : + ; OBJ: movq (%rip), + ; OBJ-NEXT: R_X86_64_GOTPCREL global + store i32 0, i32* @global ret void } @@ -39,6 +52,10 @@ ; CHECK: movq func@GOTPCREL(%rip), %rax ; CHECK: retq + ; OBJ-LABEL: : + ; OBJ: movq (%rip), + ; OBJ-NEXT: R_X86_64_REX_GOTPCRELX func + ret void ()* @func } diff --git a/llvm/test/CodeGen/X86/tagged-globals-static.ll b/llvm/test/CodeGen/X86/tagged-globals-static.ll --- a/llvm/test/CodeGen/X86/tagged-globals-static.ll +++ b/llvm/test/CodeGen/X86/tagged-globals-static.ll @@ -8,7 +8,7 @@ define i32* @global_addr() #0 { ; CHECK-LABEL: global_addr: - ; CHECK: movq global@GOTPCREL(%rip), %rax + ; CHECK: movq global@GOTPCREL_NORELAX(%rip), %rax ; CHECK: retq ret i32* @global @@ -16,7 +16,7 @@ define i32 @global_load() #0 { ; CHECK-LABEL: global_load: - ; CHECK: movq global@GOTPCREL(%rip), [[REG:%r[0-9a-z]+]] + ; CHECK: movq global@GOTPCREL_NORELAX(%rip), [[REG:%r[0-9a-z]+]] ; CHECK: movl ([[REG]]), %eax ; CHECK: retq @@ -26,7 +26,7 @@ define void @global_store() #0 { ; CHECK-LABEL: global_store: - ; CHECK: movq global@GOTPCREL(%rip), [[REG:%r[0-9a-z]+]] + ; CHECK: movq global@GOTPCREL_NORELAX(%rip), [[REG:%r[0-9a-z]+]] ; CHECK: movl $0, ([[REG]]) ; CHECK: retq diff --git a/llvm/test/MC/X86/gotpcrel_norelax.s b/llvm/test/MC/X86/gotpcrel_norelax.s new file mode 100644 --- /dev/null +++ b/llvm/test/MC/X86/gotpcrel_norelax.s @@ -0,0 +1,73 @@ +# RUN: llvm-mc -filetype=obj -triple=x86_64 %s -o %t.o +# RUN: llvm-readobj -r %t.o | FileCheck %s + +# CHECK: Relocations [ +# CHECK-NEXT: Section ({{.*}}) .rela.text { +# CHECK-NEXT: R_X86_64_GOTPCREL mov +# CHECK-NEXT: R_X86_64_GOTPCREL test +# CHECK-NEXT: R_X86_64_GOTPCREL adc +# CHECK-NEXT: R_X86_64_GOTPCREL add +# CHECK-NEXT: R_X86_64_GOTPCREL and +# CHECK-NEXT: R_X86_64_GOTPCREL cmp +# CHECK-NEXT: R_X86_64_GOTPCREL or +# CHECK-NEXT: R_X86_64_GOTPCREL sbb +# CHECK-NEXT: R_X86_64_GOTPCREL sub +# CHECK-NEXT: R_X86_64_GOTPCREL xor +# CHECK-NEXT: R_X86_64_GOTPCREL call +# CHECK-NEXT: R_X86_64_GOTPCREL jmp +# CHECK-NEXT: R_X86_64_GOTPCREL mov +# CHECK-NEXT: R_X86_64_GOTPCREL test +# CHECK-NEXT: R_X86_64_GOTPCREL adc +# CHECK-NEXT: R_X86_64_GOTPCREL add +# CHECK-NEXT: R_X86_64_GOTPCREL and +# CHECK-NEXT: R_X86_64_GOTPCREL cmp +# CHECK-NEXT: R_X86_64_GOTPCREL or +# CHECK-NEXT: R_X86_64_GOTPCREL sbb +# CHECK-NEXT: R_X86_64_GOTPCREL sub +# CHECK-NEXT: R_X86_64_GOTPCREL xor +# CHECK-NEXT: R_X86_64_GOTPCREL mov +# CHECK-NEXT: R_X86_64_GOTPCREL test +# CHECK-NEXT: R_X86_64_GOTPCREL adc +# CHECK-NEXT: R_X86_64_GOTPCREL add +# CHECK-NEXT: R_X86_64_GOTPCREL and +# CHECK-NEXT: R_X86_64_GOTPCREL cmp +# CHECK-NEXT: R_X86_64_GOTPCREL or +# CHECK-NEXT: R_X86_64_GOTPCREL sbb +# CHECK-NEXT: R_X86_64_GOTPCREL sub +# CHECK-NEXT: R_X86_64_GOTPCREL xor +# CHECK-NEXT: } + +movl mov@GOTPCREL_NORELAX(%rip), %eax +test %eax, test@GOTPCREL_NORELAX(%rip) +adc adc@GOTPCREL_NORELAX(%rip), %eax +add add@GOTPCREL_NORELAX(%rip), %eax +and and@GOTPCREL_NORELAX(%rip), %eax +cmp cmp@GOTPCREL_NORELAX(%rip), %eax +or or@GOTPCREL_NORELAX(%rip), %eax +sbb sbb@GOTPCREL_NORELAX(%rip), %eax +sub sub@GOTPCREL_NORELAX(%rip), %eax +xor xor@GOTPCREL_NORELAX(%rip), %eax +call *call@GOTPCREL_NORELAX(%rip) +jmp *jmp@GOTPCREL_NORELAX(%rip) + +movl mov@GOTPCREL_NORELAX(%rip), %r8d +test %r8d, test@GOTPCREL_NORELAX(%rip) +adc adc@GOTPCREL_NORELAX(%rip), %r8d +add add@GOTPCREL_NORELAX(%rip), %r8d +and and@GOTPCREL_NORELAX(%rip), %r8d +cmp cmp@GOTPCREL_NORELAX(%rip), %r8d +or or@GOTPCREL_NORELAX(%rip), %r8d +sbb sbb@GOTPCREL_NORELAX(%rip), %r8d +sub sub@GOTPCREL_NORELAX(%rip), %r8d +xor xor@GOTPCREL_NORELAX(%rip), %r8d + +movq mov@GOTPCREL_NORELAX(%rip), %rax +test %rax, test@GOTPCREL_NORELAX(%rip) +adc adc@GOTPCREL_NORELAX(%rip), %rax +add add@GOTPCREL_NORELAX(%rip), %rax +and and@GOTPCREL_NORELAX(%rip), %rax +cmp cmp@GOTPCREL_NORELAX(%rip), %rax +or or@GOTPCREL_NORELAX(%rip), %rax +sbb sbb@GOTPCREL_NORELAX(%rip), %rax +sub sub@GOTPCREL_NORELAX(%rip), %rax +xor xor@GOTPCREL_NORELAX(%rip), %rax