Index: include/llvm/MC/MCAsmBackend.h =================================================================== --- include/llvm/MC/MCAsmBackend.h +++ include/llvm/MC/MCAsmBackend.h @@ -10,6 +10,7 @@ #define LLVM_MC_MCASMBACKEND_H #include "llvm/ADT/ArrayRef.h" +#include "llvm/ADT/DenseSet.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/StringRef.h" #include "llvm/MC/MCDirectives.h" @@ -81,12 +82,16 @@ virtual const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const; /// Hook to check if a relocation is needed for some target specific reason. - virtual bool shouldForceRelocation(const MCAssembler &Asm, - const MCFixup &Fixup, - const MCValue &Target) { + virtual Optional shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) { return false; } + virtual void + handleUnknownFixups(DenseSet &UnknownFixups, + DenseMap &EvaluatedFixups) {} + /// Hook to check if extra nop bytes must be inserted for alignment directive. /// For some targets this may be necessary in order to support linker /// relaxation. The number of bytes to insert are returned in Size. Index: include/llvm/MC/MCAssembler.h =================================================================== --- include/llvm/MC/MCAssembler.h +++ include/llvm/MC/MCAssembler.h @@ -173,6 +173,26 @@ const MCFragment *DF, MCValue &Target, uint64_t &Value, bool &WasForced) const; + /// Evaluate a fixup to a relocatable expression and the value which should be + /// placed into the fixup. Same as evaluateFixup, but does not check if the + /// target needs to force the relocation. + /// + /// \param Layout The layout to use for evaluation. + /// \param Fixup The fixup to evaluate. + /// \param DF The fragment the fixup is inside. + /// \param Target [out] On return, the relocatable expression the fixup + /// evaluates to. + /// \param Value [out] On return, the value of the fixup as currently laid + /// out. + /// \param ErrorMsg [out] On return, stores the message related to any error + /// raised while evaluating this fixup. + /// \return Whether the fixup value was fully resolved. This is true if the + /// \p Value result is fixed, otherwise the value may change due to + /// relocation. + bool evaluateFixupPreTarget(const MCAsmLayout &Layout, const MCFixup &Fixup, + const MCFragment *DF, MCValue &Target, + uint64_t &Value, std::string &Error) const; + /// Check whether a fixup can be satisfied, or whether it needs to be relaxed /// (increased in size, in order to hold its value correctly). bool fixupNeedsRelaxation(const MCFixup &Fixup, const MCRelaxableFragment *DF, @@ -206,8 +226,9 @@ /// finishLayout - Finalize a layout, including fragment lowering. void finishLayout(MCAsmLayout &Layout); - std::tuple - handleFixup(const MCAsmLayout &Layout, MCFragment &F, const MCFixup &Fixup); + void handleUnresolvedFixup(const MCAsmLayout &Layout, MCFragment &F, + const MCFixup &Fixup, const MCValue &Target, + uint64_t &FixedValue); public: std::vector> Symvers; Index: lib/MC/MCAssembler.cpp =================================================================== --- lib/MC/MCAssembler.cpp +++ lib/MC/MCAssembler.cpp @@ -199,6 +199,31 @@ const MCFixup &Fixup, const MCFragment *DF, MCValue &Target, uint64_t &Value, bool &WasForced) const { + WasForced = false; + std::string ErrorMsg; + bool IsResolved = + evaluateFixupPreTarget(Layout, Fixup, DF, Target, Value, ErrorMsg); + if (!ErrorMsg.empty()) { + getContext().reportError(Fixup.getLoc(), ErrorMsg); + return true; + } + // Let the backend force a relocation if needed. + if (IsResolved) { + auto ShouldForce = getBackend().shouldForceRelocation(*this, Fixup, Target); + if (!ShouldForce.hasValue() || ShouldForce.getValue()) { + IsResolved = false; + WasForced = true; + } + } + + return IsResolved; +} + +bool MCAssembler::evaluateFixupPreTarget(const MCAsmLayout &Layout, + const MCFixup &Fixup, + const MCFragment *DF, MCValue &Target, + uint64_t &Value, + std::string &ErrorMsg) const { ++stats::evaluateFixup; // FIXME: This code has some duplication with recordRelocation. We should @@ -208,17 +233,15 @@ // On error claim to have completely evaluated the fixup, to prevent any // further processing from being done. const MCExpr *Expr = Fixup.getValue(); - MCContext &Ctx = getContext(); Value = 0; - WasForced = false; + ErrorMsg = ""; if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) { - Ctx.reportError(Fixup.getLoc(), "expected relocatable expression"); + ErrorMsg = "expected relocatable expression"; return true; } if (const MCSymbolRefExpr *RefB = Target.getSymB()) { if (RefB->getKind() != MCSymbolRefExpr::VK_None) { - Ctx.reportError(Fixup.getLoc(), - "unsupported subtraction of qualified symbol"); + ErrorMsg = "unsupported subtraction of qualified symbol"; return true; } } @@ -274,12 +297,6 @@ Value -= Offset; } - // Let the backend force a relocation if needed. - if (IsResolved && getBackend().shouldForceRelocation(*this, Fixup, Target)) { - IsResolved = false; - WasForced = true; - } - return IsResolved; } @@ -717,41 +734,32 @@ assert(OS.tell() - Start == Layout.getSectionAddressSize(Sec)); } -std::tuple -MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F, - const MCFixup &Fixup) { - // Evaluate the fixup. - MCValue Target; - uint64_t FixedValue; - bool WasForced; - bool IsResolved = evaluateFixup(Layout, Fixup, &F, Target, FixedValue, - WasForced); - if (!IsResolved) { - // The fixup was unresolved, we need a relocation. Inform the object - // writer of the relocation, and give it an opportunity to adjust the - // fixup value if need be. - if (Target.getSymA() && Target.getSymB() && - getBackend().requiresDiffExpressionRelocations()) { - // The fixup represents the difference between two symbols, which the - // backend has indicated must be resolved at link time. Split up the fixup - // into two relocations, one for the add, and one for the sub, and emit - // both of these. The constant will be associated with the add half of the - // expression. - MCFixup FixupAdd = MCFixup::createAddFor(Fixup); - MCValue TargetAdd = - MCValue::get(Target.getSymA(), nullptr, Target.getConstant()); - getWriter().recordRelocation(*this, Layout, &F, FixupAdd, TargetAdd, - FixedValue); - MCFixup FixupSub = MCFixup::createSubFor(Fixup); - MCValue TargetSub = MCValue::get(Target.getSymB()); - getWriter().recordRelocation(*this, Layout, &F, FixupSub, TargetSub, - FixedValue); - } else { - getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, - FixedValue); - } +void MCAssembler::handleUnresolvedFixup(const MCAsmLayout &Layout, + MCFragment &F, const MCFixup &Fixup, + const MCValue &Target, + uint64_t &FixedValue) { + // The fixup was unresolved, we need a relocation. Inform the object + // writer of the relocation, and give it an opportunity to adjust the + // fixup value if need be. + if (Target.getSymA() && Target.getSymB() && + getBackend().requiresDiffExpressionRelocations()) { + // The fixup represents the difference between two symbols, which the + // backend has indicated must be resolved at link time. Split up the fixup + // into two relocations, one for the add, and one for the sub, and emit + // both of these. The constant will be associated with the add half of the + // expression. + MCFixup FixupAdd = MCFixup::createAddFor(Fixup); + MCValue TargetAdd = + MCValue::get(Target.getSymA(), nullptr, Target.getConstant()); + getWriter().recordRelocation(*this, Layout, &F, FixupAdd, TargetAdd, + FixedValue); + MCFixup FixupSub = MCFixup::createSubFor(Fixup); + MCValue TargetSub = MCValue::get(Target.getSymB()); + getWriter().recordRelocation(*this, Layout, &F, FixupSub, TargetSub, + FixedValue); + } else { + getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, FixedValue); } - return std::make_tuple(Target, FixedValue, IsResolved); } void MCAssembler::layout(MCAsmLayout &Layout) { @@ -842,13 +850,62 @@ continue; } else llvm_unreachable("Unknown fragment with fixups!"); + + // Unknown fixups are fixups for which the target does not yet know if a + // relocation should be forced or not. These should be re-evaluated after + // all fixups in this section have been processed. + DenseSet UnknownFixups; + // Fixups that have been completely evaluated, along with whether or not + // they can been resolved without a relocation. + DenseMap EvaluatedFixups; + // Store data for all fixups to avoid re-evaluating on the second pass. + // Should be iterated over in the same order the fixups were originally + // encountered - hence the use of a MapVector. + MapVector> + FixupsData; + for (const MCFixup &Fixup : Fixups) { + MCValue Target; uint64_t FixedValue; - bool IsResolved; + std::string ErrorMsg; + bool IsResolved = evaluateFixupPreTarget(Layout, Fixup, &Frag, Target, + FixedValue, ErrorMsg); + if (IsResolved && ErrorMsg.empty()) { + auto WasForced = + getBackend().shouldForceRelocation(*this, Fixup, Target); + if (!WasForced.hasValue()) { + // The target indicated that it doesn't know whether to force a + // relocation for this fixup. + UnknownFixups.insert(&Fixup); + FixupsData[&Fixup] = {Target, FixedValue, ErrorMsg}; + continue; + } + if (WasForced.getValue()) + IsResolved = false; + } + EvaluatedFixups[&Fixup] = IsResolved; + FixupsData[&Fixup] = {Target, FixedValue, ErrorMsg}; + } + + // Allow the target to clear the unknown fixups. + getBackend().handleUnknownFixups(UnknownFixups, EvaluatedFixups); + assert(UnknownFixups.empty() && "Target failed to categorize all fixups"); + + // Now actions for all fixups are known, we can record relocations where + // necessary and apply fixups. + for (auto &FixupDataEntry : FixupsData) { + const MCFixup *Fixup = FixupDataEntry.first; + bool IsResolved = EvaluatedFixups[Fixup]; + MCValue Target; - std::tie(Target, FixedValue, IsResolved) = - handleFixup(Layout, Frag, Fixup); - getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue, + uint64_t FixedValue; + std::string ErrorMsg; + std::tie(Target, FixedValue, ErrorMsg) = FixupDataEntry.second; + if (!ErrorMsg.empty()) + getContext().reportError(Fixup->getLoc(), ErrorMsg); + if (!IsResolved) + handleUnresolvedFixup(Layout, Frag, *Fixup, Target, FixedValue); + getBackend().applyFixup(*this, *Fixup, Target, Contents, FixedValue, IsResolved, STI); } } Index: lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp =================================================================== --- lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp +++ lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp @@ -91,8 +91,9 @@ unsigned getFixupKindContainereSizeInBytes(unsigned Kind) const; - bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, - const MCValue &Target) override; + Optional shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) override; }; } // end anonymous namespace @@ -442,9 +443,9 @@ return true; } -bool AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm, - const MCFixup &Fixup, - const MCValue &Target) { +Optional AArch64AsmBackend::shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) { // The ADRP instruction adds some multiple of 0x1000 to the current PC & // ~0xfff. This means that the required offset to reach a symbol can vary by // up to one step depending on where the ADRP is in memory. For example: Index: lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h =================================================================== --- lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h +++ lib/Target/ARM/MCTargetDesc/ARMAsmBackend.h @@ -39,8 +39,9 @@ const MCFixupKindInfo &getFixupKindInfo(MCFixupKind Kind) const override; - bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, - const MCValue &Target) override; + Optional shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) override; unsigned adjustFixupValue(const MCAssembler &Asm, const MCFixup &Fixup, const MCValue &Target, uint64_t Value, Index: lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp =================================================================== --- lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp +++ lib/Target/ARM/MCTargetDesc/ARMAsmBackend.cpp @@ -755,9 +755,9 @@ } } -bool ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm, - const MCFixup &Fixup, - const MCValue &Target) { +Optional ARMAsmBackend::shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) { const MCSymbolRefExpr *A = Target.getSymA(); const MCSymbol *Sym = A ? &A->getSymbol() : nullptr; const unsigned FixupKind = Fixup.getKind() ; Index: lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp =================================================================== --- lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp +++ lib/Target/Hexagon/MCTargetDesc/HexagonAsmBackend.cpp @@ -200,8 +200,9 @@ return Infos[Kind - FirstTargetFixupKind]; } - bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, - const MCValue &Target) override { + Optional shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) override { MCFixupKind Kind = Fixup.getKind(); switch((unsigned)Kind) { Index: lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h =================================================================== --- lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h +++ lib/Target/Mips/MCTargetDesc/MipsAsmBackend.h @@ -87,8 +87,9 @@ bool writeNopData(raw_ostream &OS, uint64_t Count) const override; - bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, - const MCValue &Target) override; + Optional shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) override; bool isMicroMips(const MCSymbol *Sym) const override; }; // class MipsAsmBackend Index: lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp =================================================================== --- lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp +++ lib/Target/Mips/MCTargetDesc/MipsAsmBackend.cpp @@ -533,9 +533,9 @@ return true; } -bool MipsAsmBackend::shouldForceRelocation(const MCAssembler &Asm, - const MCFixup &Fixup, - const MCValue &Target) { +Optional MipsAsmBackend::shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) { const unsigned FixupKind = Fixup.getKind(); switch (FixupKind) { default: Index: lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp =================================================================== --- lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp +++ lib/Target/PowerPC/MCTargetDesc/PPCAsmBackend.cpp @@ -133,8 +133,9 @@ } } - bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, - const MCValue &Target) override { + Optional shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) override { switch ((PPC::Fixups)Fixup.getKind()) { default: return false; Index: lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h =================================================================== --- lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h +++ lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.h @@ -60,8 +60,13 @@ std::unique_ptr createObjectTargetWriter() const override; - bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, - const MCValue &Target) override; + Optional shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) override; + + void handleUnknownFixups( + DenseSet &UnknownFixups, + DenseMap &EvaluatedFixups) override; bool fixupNeedsRelaxation(const MCFixup &Fixup, uint64_t Value, const MCRelaxableFragment *DF, Index: lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp =================================================================== --- lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp +++ lib/Target/RISCV/MCTargetDesc/RISCVAsmBackend.cpp @@ -25,10 +25,11 @@ // If linker relaxation is enabled, or the relax option had previously been // enabled, always emit relocations even if the fixup can be resolved. This is // necessary for correctness as offsets may change during relaxation. -bool RISCVAsmBackend::shouldForceRelocation(const MCAssembler &Asm, - const MCFixup &Fixup, - const MCValue &Target) { +Optional RISCVAsmBackend::shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) { bool ShouldForce = false; + bool Unknown = false; switch ((unsigned)Fixup.getKind()) { default: @@ -36,7 +37,7 @@ case RISCV::fixup_riscv_got_hi20: return true; case RISCV::fixup_riscv_pcrel_hi20: - ResolvedPCRelHiFixups.insert(&Fixup); + Unknown = true; break; case RISCV::fixup_riscv_pcrel_lo12_i: case RISCV::fixup_riscv_pcrel_lo12_s: @@ -57,15 +58,76 @@ ShouldForce = true; break; case RISCV::fixup_riscv_pcrel_hi20: - if (ResolvedPCRelHiFixups.count(T) == 0) - ShouldForce = true; + Unknown = true; break; } break; } - return ShouldForce || STI.getFeatureBits()[RISCV::FeatureRelax] || - ForceRelocs; + if (ShouldForce || STI.getFeatureBits()[RISCV::FeatureRelax] || ForceRelocs) + return true; + if (Unknown) + return None; + return false; +} + +void RISCVAsmBackend::handleUnknownFixups( + DenseSet &UnknownFixups, + DenseMap &EvaluatedFixups) { + + DenseMap EvaluatedPCRelLoFixups; + for (const MCFixup *Fixup : UnknownFixups) { + switch ((unsigned)Fixup->getKind()) { + default: + llvm_unreachable("Currently only %pcrel_lo and %pcrel_hi should be " + "handled as 'unknown' fixups."); + case RISCV::fixup_riscv_pcrel_hi20: + // Leave %pcrel_hi fixups to be evaluated later. + break; + case RISCV::fixup_riscv_pcrel_lo12_i: + case RISCV::fixup_riscv_pcrel_lo12_s: + const MCFixup *T = + cast(Fixup->getValue())->getPCRelHiFixup(); + if (UnknownFixups.count(T)) { + // The corresponding %pcrel_hi is also 'unknown', therefore there is no + // need to force relocation and the fixup can be resolved. The %pcrel_hi + // will also be marked as resolved later. + EvaluatedFixups[Fixup] = true; + EvaluatedPCRelLoFixups[Fixup] = true; + } else { + // Otherwise the only other option is that the %pcrel_hi was unresolved. + EvaluatedFixups[Fixup] = false; + EvaluatedPCRelLoFixups[Fixup] = false; + } + UnknownFixups.erase(Fixup); + break; + } + } + // Find any unknown %pcrel_hi's through their corresponding %pcrel_lo. + for (const auto &EvaluatedFixup : EvaluatedPCRelLoFixups) { + const MCFixup *Fixup = EvaluatedFixup.first; + switch ((unsigned)Fixup->getKind()) { + default: + break; + case RISCV::fixup_riscv_pcrel_lo12_i: + case RISCV::fixup_riscv_pcrel_lo12_s: + const MCFixup *T = + cast(Fixup->getValue())->getPCRelHiFixup(); + if (UnknownFixups.count(T)) { + // We now know whether to resolve the %pcrel_hi based on this %pcrel_lo. + EvaluatedFixups[T] = EvaluatedFixup.second; + UnknownFixups.erase(T); + } + break; + } + } + + // All remaining unknowns must be a %pcrel_hi without a corresponding + // %pcrel_lo, so they can be safely resolved. + for (const MCFixup *Fixup : UnknownFixups) { + EvaluatedFixups[Fixup] = true; + UnknownFixups.erase(Fixup); + } } bool RISCVAsmBackend::fixupNeedsRelaxationAdvanced(const MCFixup &Fixup, Index: lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp =================================================================== --- lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp +++ lib/Target/Sparc/MCTargetDesc/SparcAsmBackend.cpp @@ -223,8 +223,9 @@ return InfosBE[Kind - FirstTargetFixupKind]; } - bool shouldForceRelocation(const MCAssembler &Asm, const MCFixup &Fixup, - const MCValue &Target) override { + Optional shouldForceRelocation(const MCAssembler &Asm, + const MCFixup &Fixup, + const MCValue &Target) override { switch ((Sparc::Fixups)Fixup.getKind()) { default: return false;