diff --git a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp --- a/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ b/llvm/lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -970,11 +970,13 @@ bool isMOVZMovAlias() const { if (!isImm()) return false; - const MCConstantExpr *CE = dyn_cast(getImm()); - if (!CE) return false; - uint64_t Value = CE->getValue(); + const MCExpr *E = dyn_cast(getImm()); + if (const MCConstantExpr *CE = dyn_cast(E)) { + uint64_t Value = CE->getValue(); - return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth); + return AArch64_AM::isMOVZMovAlias(Value, Shift, RegWidth); + } + return E; } template @@ -1774,9 +1776,14 @@ void addMOVZMovAliasOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); - const MCConstantExpr *CE = cast(getImm()); - uint64_t Value = CE->getValue(); - Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff)); + const MCConstantExpr *CE = dyn_cast(getImm()); + if (CE) { + uint64_t Value = CE->getValue(); + Inst.addOperand(MCOperand::createImm((Value >> Shift) & 0xffff)); + } else { + addExpr(Inst, getImm()); + Inst.addOperand(MCOperand::createImm(0)); + } } template diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64AsmBackend.cpp @@ -239,8 +239,25 @@ Ctx.reportError(Fixup.getLoc(), "fixup must be 16-byte aligned"); return Value >> 4; case AArch64::fixup_aarch64_movw: { + if (!IsResolved) { + // FIXME: Figure out when this can actually happen, and verify our + // behavior. + Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet " + "implemented"); + return Value; + } AArch64MCExpr::VariantKind RefKind = static_cast(Target.getRefKind()); + if (!RefKind) { + if (SignedValue > 0xFFFF || SignedValue < -0xFFFF) + Ctx.reportError(Fixup.getLoc(), "fixup value out of range"); + + // Invert the negative immediate because it will feed into a MOVN. + if (SignedValue < 0) + SignedValue = ~SignedValue; + Value = static_cast(SignedValue); + return Value; + } if (AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_ABS && AArch64MCExpr::getSymbolLoc(RefKind) != AArch64MCExpr::VK_SABS) { // VK_GOTTPREL, VK_TPREL, VK_DTPREL are movw fixups, but they can't @@ -251,13 +268,6 @@ return Value; } - if (!IsResolved) { - // FIXME: Figure out when this can actually happen, and verify our - // behavior. - Ctx.reportError(Fixup.getLoc(), "unresolved movw fixup not yet " - "implemented"); - return Value; - } if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) { switch (AArch64MCExpr::getAddressFrag(RefKind)) { @@ -441,6 +451,14 @@ // handle this more cleanly. This may affect the output of -show-mc-encoding. AArch64MCExpr::VariantKind RefKind = static_cast(Target.getRefKind()); + if (!RefKind) { + if (Fixup.getTargetKind() == AArch64::fixup_aarch64_movw) { + if (SignedValue < 0) + Data[Offset + 3] &= ~(1 << 6); + else + Data[Offset + 3] |= (1 << 6); + } + } if (AArch64MCExpr::getSymbolLoc(RefKind) == AArch64MCExpr::VK_SABS) { // If the immediate is negative, generate MOVN else MOVZ. // (Bit 30 = 0) ==> MOVN, (Bit 30 = 1) ==> MOVZ. diff --git a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp --- a/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp +++ b/llvm/lib/Target/AArch64/MCTargetDesc/AArch64MCCodeEmitter.cpp @@ -569,23 +569,24 @@ if (UImm16MO.isImm()) return EncodedValue; - const AArch64MCExpr *A64E = cast(UImm16MO.getExpr()); - switch (A64E->getKind()) { - case AArch64MCExpr::VK_DTPREL_G2: - case AArch64MCExpr::VK_DTPREL_G1: - case AArch64MCExpr::VK_DTPREL_G0: - case AArch64MCExpr::VK_GOTTPREL_G1: - case AArch64MCExpr::VK_TPREL_G2: - case AArch64MCExpr::VK_TPREL_G1: - case AArch64MCExpr::VK_TPREL_G0: - return EncodedValue & ~(1u << 30); - default: - // Nothing to do for an unsigned fixup. - return EncodedValue; + const MCExpr *E = UImm16MO.getExpr(); + if (const AArch64MCExpr *A64E = dyn_cast(E)) { + switch (A64E->getKind()) { + case AArch64MCExpr::VK_DTPREL_G2: + case AArch64MCExpr::VK_DTPREL_G1: + case AArch64MCExpr::VK_DTPREL_G0: + case AArch64MCExpr::VK_GOTTPREL_G1: + case AArch64MCExpr::VK_TPREL_G2: + case AArch64MCExpr::VK_TPREL_G1: + case AArch64MCExpr::VK_TPREL_G0: + return EncodedValue & ~(1u << 30); + default: + // Nothing to do for an unsigned fixup. + return EncodedValue; + } } - - return EncodedValue & ~(1u << 30); + return EncodedValue; } void AArch64MCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS, diff --git a/llvm/test/MC/AArch64/basic-a64-diagnostics.s b/llvm/test/MC/AArch64/basic-a64-diagnostics.s --- a/llvm/test/MC/AArch64/basic-a64-diagnostics.s +++ b/llvm/test/MC/AArch64/basic-a64-diagnostics.s @@ -167,9 +167,9 @@ // MOV alias should not accept any fiddling mov x2, xsp, #123 mov wsp, w27, #0xfff, lsl #12 -// CHECK-ERROR: error: expected compatible register or logical immediate +// CHECK-ERROR: error: invalid operand for instruction // CHECK-ERROR-NEXT: mov x2, xsp, #123 -// CHECK-ERROR-NEXT: ^ +// CHECK-ERROR-NEXT: ^ // CHECK-ERROR-NEXT: error: invalid operand for instruction // CHECK-ERROR-NEXT: mov wsp, w27, #0xfff, lsl #12 // CHECK-ERROR-NEXT: ^ diff --git a/llvm/test/MC/AArch64/mov-expression-as-immediate.s b/llvm/test/MC/AArch64/mov-expression-as-immediate.s new file mode 100644 --- /dev/null +++ b/llvm/test/MC/AArch64/mov-expression-as-immediate.s @@ -0,0 +1,9 @@ +// RUN: llvm-mc -triple aarch64-none-linux-gnu %s -filetype=obj -o %t | llvm-objdump -d %t | FileCheck %s + +0: +.skip 4 +1: +mov x0, 1b - 0b +// CHECK: mov x0, #4 +mov x0, 0b - 1b +// CHECK: mov x0, #-4