diff --git a/compiler-rt/test/hwasan/lit.cfg.py b/compiler-rt/test/hwasan/lit.cfg.py --- a/compiler-rt/test/hwasan/lit.cfg.py +++ b/compiler-rt/test/hwasan/lit.cfg.py @@ -18,13 +18,6 @@ else: config.available_features.add('pointer-tagging') if config.target_arch == 'x86_64': - # By default the assembler uses R_X86_64_REX_GOTPCRELX relocations, which can - # be relaxed to direct references. When tagged globals are enabled, these - # references fail to link since they have more than a 32-bit offset from RIP. - # As a workaround, we disable the relaxation. - # TODO: Implement a way to disable for the affected relocations only. - clang_hwasan_common_cflags += ["-Wa,-mrelax-relocations=no"] - # The callback instrumentation used on x86_64 has a 1/64 chance of choosing a # stack tag of 0. This causes stack tests to become flaky, so we force tags # to be generated via calls to __hwasan_generate_tag, which never returns 0. diff --git a/llvm/include/llvm/MC/MCExpr.h b/llvm/include/llvm/MC/MCExpr.h --- a/llvm/include/llvm/MC/MCExpr.h +++ b/llvm/include/llvm/MC/MCExpr.h @@ -200,6 +200,7 @@ VK_GOTREL, VK_PCREL, VK_GOTPCREL, + VK_GOTPCREL_NORELAX, VK_GOTTPOFF, VK_INDNTPOFF, VK_NTPOFF, diff --git a/llvm/lib/MC/ELFObjectWriter.cpp b/llvm/lib/MC/ELFObjectWriter.cpp --- a/llvm/lib/MC/ELFObjectWriter.cpp +++ b/llvm/lib/MC/ELFObjectWriter.cpp @@ -1311,6 +1311,7 @@ case MCSymbolRefExpr::VK_GOT: case MCSymbolRefExpr::VK_PLT: case MCSymbolRefExpr::VK_GOTPCREL: + case MCSymbolRefExpr::VK_GOTPCREL_NORELAX: case MCSymbolRefExpr::VK_PPC_GOT_LO: case MCSymbolRefExpr::VK_PPC_GOT_HI: case MCSymbolRefExpr::VK_PPC_GOT_HA: diff --git a/llvm/lib/MC/MCExpr.cpp b/llvm/lib/MC/MCExpr.cpp --- a/llvm/lib/MC/MCExpr.cpp +++ b/llvm/lib/MC/MCExpr.cpp @@ -229,6 +229,7 @@ case VK_GOTOFF: return "GOTOFF"; case VK_GOTREL: return "GOTREL"; case VK_PCREL: return "PCREL"; + case VK_GOTPCREL_NORELAX: // TODO: do something special case VK_GOTPCREL: return "GOTPCREL"; case VK_GOTTPOFF: return "GOTTPOFF"; case VK_INDNTPOFF: return "INDNTPOFF"; @@ -393,7 +394,7 @@ .Case("gotoff", VK_GOTOFF) .Case("gotrel", VK_GOTREL) .Case("pcrel", VK_PCREL) - .Case("gotpcrel", VK_GOTPCREL) + .Case("gotpcrel", VK_GOTPCREL) // TODO: do somehting here? .Case("gottpoff", VK_GOTTPOFF) .Case("indntpoff", VK_INDNTPOFF) .Case("ntpoff", VK_NTPOFF) diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h --- a/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h +++ b/llvm/lib/Target/X86/MCTargetDesc/X86BaseInfo.h @@ -441,6 +441,14 @@ /// SYMBOL_LABEL @GOTPCREL MO_GOTPCREL, + /// MO_GOTPCREL_NORELAX - Same as MO_GOTPCREL except that R_X86_64_GOTPCREL + /// relocations are guaranteed to be emitted by the integrated assembler + /// instead of the relaxable R_X86_64[_REX]_GOTPCRELX relocations. + /// + /// See the X86-64 ELF ABI supplement for more details. + /// SYMBOL_LABEL @GOTPCREL + MO_GOTPCREL_NORELAX, + /// MO_PLT - On a symbol operand this indicates that the immediate is /// offset to the PLT entry of symbol name from the current code location. /// diff --git a/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp b/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp --- a/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp +++ b/llvm/lib/Target/X86/MCTargetDesc/X86ELFObjectWriter.cpp @@ -218,6 +218,9 @@ return ELF::R_X86_64_REX_GOTPCRELX; } llvm_unreachable("unexpected relocation type!"); + case MCSymbolRefExpr::VK_GOTPCREL_NORELAX: + checkIs32(Ctx, Loc, Type); + return ELF::R_X86_64_GOTPCREL; case MCSymbolRefExpr::VK_X86_PLTOFF: checkIs64(Ctx, Loc, Type); return ELF::R_X86_64_PLTOFF64; diff --git a/llvm/lib/Target/X86/X86AsmPrinter.cpp b/llvm/lib/Target/X86/X86AsmPrinter.cpp --- a/llvm/lib/Target/X86/X86AsmPrinter.cpp +++ b/llvm/lib/Target/X86/X86AsmPrinter.cpp @@ -189,6 +189,7 @@ case X86II::MO_DTPOFF: O << "@DTPOFF"; break; case X86II::MO_NTPOFF: O << "@NTPOFF"; break; case X86II::MO_GOTNTPOFF: O << "@GOTNTPOFF"; break; + case X86II::MO_GOTPCREL_NORELAX: // TODO: Can we emit GOTPCRELX? case X86II::MO_GOTPCREL: O << "@GOTPCREL"; break; case X86II::MO_GOT: O << "@GOT"; break; case X86II::MO_GOTOFF: O << "@GOTOFF"; break; diff --git a/llvm/lib/Target/X86/X86FastISel.cpp b/llvm/lib/Target/X86/X86FastISel.cpp --- a/llvm/lib/Target/X86/X86FastISel.cpp +++ b/llvm/lib/Target/X86/X86FastISel.cpp @@ -789,7 +789,8 @@ RC = &X86::GR32RegClass; } - if (Subtarget->isPICStyleRIPRel() || GVFlags == X86II::MO_GOTPCREL) + if (Subtarget->isPICStyleRIPRel() || GVFlags == X86II::MO_GOTPCREL || + GVFlags == X86II::MO_GOTPCREL_NORELAX) StubAM.Base.Reg = X86::RIP; LoadReg = createResultReg(RC); @@ -3486,6 +3487,7 @@ // NonLazyBind calls or dllimport calls. bool NeedLoad = OpFlags == X86II::MO_DLLIMPORT || OpFlags == X86II::MO_GOTPCREL || + OpFlags == X86II::MO_GOTPCREL_NORELAX || OpFlags == X86II::MO_COFFSTUB; unsigned CallOpc = NeedLoad ? (Is64Bit ? X86::CALL64m : X86::CALL32m) diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp --- a/llvm/lib/Target/X86/X86ISelLowering.cpp +++ b/llvm/lib/Target/X86/X86ISelLowering.cpp @@ -19619,7 +19619,7 @@ return X86ISD::WrapperRIP; // GOTPCREL references must always use RIP. - if (OpFlags == X86II::MO_GOTPCREL) + if (OpFlags == X86II::MO_GOTPCREL || OpFlags == X86II::MO_GOTPCREL_NORELAX) return X86ISD::WrapperRIP; return X86ISD::Wrapper; diff --git a/llvm/lib/Target/X86/X86InstrInfo.h b/llvm/lib/Target/X86/X86InstrInfo.h --- a/llvm/lib/Target/X86/X86InstrInfo.h +++ b/llvm/lib/Target/X86/X86InstrInfo.h @@ -73,6 +73,7 @@ switch (TargetFlag) { case X86II::MO_DLLIMPORT: // dllimport stub. case X86II::MO_GOTPCREL: // rip-relative GOT reference. + case X86II::MO_GOTPCREL_NORELAX: case X86II::MO_GOT: // normal GOT reference. case X86II::MO_DARWIN_NONLAZY_PIC_BASE: // Normal $non_lazy_ptr ref. case X86II::MO_DARWIN_NONLAZY: // Normal $non_lazy_ptr ref. diff --git a/llvm/lib/Target/X86/X86InstrInfo.cpp b/llvm/lib/Target/X86/X86InstrInfo.cpp --- a/llvm/lib/Target/X86/X86InstrInfo.cpp +++ b/llvm/lib/Target/X86/X86InstrInfo.cpp @@ -8866,6 +8866,7 @@ {MO_GOT, "x86-got"}, {MO_GOTOFF, "x86-gotoff"}, {MO_GOTPCREL, "x86-gotpcrel"}, + {MO_GOTPCREL_NORELAX, "x86-gotpcrel-norelax"}, {MO_PLT, "x86-plt"}, {MO_TLSGD, "x86-tlsgd"}, {MO_TLSLD, "x86-tlsld"}, diff --git a/llvm/lib/Target/X86/X86MCInstLower.cpp b/llvm/lib/Target/X86/X86MCInstLower.cpp --- a/llvm/lib/Target/X86/X86MCInstLower.cpp +++ b/llvm/lib/Target/X86/X86MCInstLower.cpp @@ -277,6 +277,9 @@ case X86II::MO_GOTPCREL: RefKind = MCSymbolRefExpr::VK_GOTPCREL; break; + case X86II::MO_GOTPCREL_NORELAX: + RefKind = MCSymbolRefExpr::VK_GOTPCREL_NORELAX; + break; case X86II::MO_GOT: RefKind = MCSymbolRefExpr::VK_GOT; break; diff --git a/llvm/lib/Target/X86/X86Subtarget.cpp b/llvm/lib/Target/X86/X86Subtarget.cpp --- a/llvm/lib/Target/X86/X86Subtarget.cpp +++ b/llvm/lib/Target/X86/X86Subtarget.cpp @@ -72,7 +72,7 @@ // errors, so we go through the GOT instead. if (AllowTaggedGlobals && TM.getCodeModel() == CodeModel::Small && GV && !isa(GV)) - return X86II::MO_GOTPCREL; + return X86II::MO_GOTPCREL_NORELAX; // If we're not PIC, it's not very interesting. if (!isPositionIndependent()) @@ -167,6 +167,11 @@ // reference for them. if (TM.getCodeModel() == CodeModel::Large) return isTargetELF() ? X86II::MO_GOT : X86II::MO_NO_FLAG; + // Tagged globals have non-zero upper bits, which makes direct references + // require a 64-bit immediate. So we can't let the linker relax the + // relocation to a 32-bit RIP-relative direct reference. + if (AllowTaggedGlobals && GV && !isa(GV)) + return X86II::MO_GOTPCREL_NORELAX; return X86II::MO_GOTPCREL; }