diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp --- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -970,11 +970,19 @@ bool isMOVZMovAlias() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); - if (!CE) return false; - uint64_t Value = CE->getValue(); + const MCExpr *E = dyn_cast<MCExpr>(getImm()); + if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(E)) { + uint64_t Value = CE->getValue(); - return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth); + return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth); + } + // The operand is the result of substraction between two labels + if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(E)) { + return (BE->getOpcode() == MCBinaryExpr::Opcode::Sub && + BE->getLHS()->getKind() == MCExpr::ExprKind::SymbolRef && + BE->getLHS()->getKind() == MCExpr::ExprKind::SymbolRef); + } + return false; } template<int RegWidth, int Shift> @@ -1774,9 +1782,14 @@ void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *CE = cast<MCConstantExpr>(getImm()); - uint64_t Value = CE->getValue(); - Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff)); + const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); + if (CE) { + uint64_t Value = CE->getValue(); + Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff)); + } else { + addExpr(Inst, getImm()); + Inst.addOperand(MCOperand::createImm(0)); + } } template<int Shift> diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp @@ -239,8 +239,27 @@ Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned"); return Value >> 4; case AArch64::fixup_aarch64_movw: { + if (!IsResolved) { + // FIXME: Figure out when this can actually happen, and verify our + // behavior. + Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet " + "implemented"); + return Value; + } AArch64MCExpr::VariantKind RefKind = static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind()); + if (!RefKind) { + if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Fixup.getValue())) { + if (BE->getOpcode() == MCBinaryExpr::Opcode::Sub && + BE->getLHS()->getKind() == MCExpr::ExprKind::SymbolRef && + BE->getLHS()->getKind() == MCExpr::ExprKind::SymbolRef) { + if (SignedValue < 0) + SignedValue = ~SignedValue; + Value = static_cast<uint64_t>(SignedValue); + return Value; + } + } + } if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS && AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) { // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't @@ -251,13 +270,6 @@ return Value; } - if (!IsResolved) { - // FIXME: Figure out when this can actually happen, and verify our - // behavior. - Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet " - "implemented"); - return Value; - } if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) { switch (AArch64MCExpr::getAddressFrag(RefKind)) { @@ -441,6 +453,20 @@ // handle this more cleanly. This may affect the output of -show-mc-encoding. AArch64MCExpr::VariantKind RefKind = static_cast<AArch64MCExpr::VariantKind>(Target.getRefKind()); + if (!RefKind) { + if (Fixup.getTargetKind() == AArch64::fixup_aarch64_movw) { + if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(Fixup.getValue())) { + if (BE->getOpcode() == MCBinaryExpr::Opcode::Sub && + BE->getLHS()->getKind() == MCExpr::ExprKind::SymbolRef && + BE->getLHS()->getKind() == MCExpr::ExprKind::SymbolRef) { + if (SignedValue < 0) + Data[Offset + 3] &= ~(1 << 6); + else + Data[Offset + 3] |= (1 << 6); + } + } + } + } if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) { // If the immediate is negative, generate MOVN else MOVZ. // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ. diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp @@ -569,21 +569,30 @@ if (UImm16MO.isImm()) return EncodedValue; - const AArch64MCExpr *A64E = cast<AArch64MCExpr>(UImm16MO.getExpr()); - switch (A64E->getKind()) { - case AArch64MCExpr::VK_DTPREL_G2: - case AArch64MCExpr::VK_DTPREL_G1: - case AArch64MCExpr::VK_DTPREL_G0: - case AArch64MCExpr::VK_GOTTPREL_G1: - case AArch64MCExpr::VK_TPREL_G2: - case AArch64MCExpr::VK_TPREL_G1: - case AArch64MCExpr::VK_TPREL_G0: - return EncodedValue & ~(1u << 30); - default: - // Nothing to do for an unsigned fixup. - return EncodedValue; + const MCExpr *E = UImm16MO.getExpr(); + if (const AArch64MCExpr *A64E = dyn_cast<AArch64MCExpr>(E)) { + switch (A64E->getKind()) { + case AArch64MCExpr::VK_DTPREL_G2: + case AArch64MCExpr::VK_DTPREL_G1: + case AArch64MCExpr::VK_DTPREL_G0: + case AArch64MCExpr::VK_GOTTPREL_G1: + case AArch64MCExpr::VK_TPREL_G2: + case AArch64MCExpr::VK_TPREL_G1: + case AArch64MCExpr::VK_TPREL_G0: + return EncodedValue & ~(1u << 30); + default: + // Nothing to do for an unsigned fixup. + return EncodedValue; + } } + if (const MCBinaryExpr *BE = dyn_cast<MCBinaryExpr>(E)) { + if (BE->getOpcode() == MCBinaryExpr::Opcode::Sub && + BE->getLHS()->getKind() == MCExpr::ExprKind::SymbolRef && + BE->getLHS()->getKind() == MCExpr::ExprKind::SymbolRef) { + return EncodedValue; + } + } return EncodedValue & ~(1u << 30); }