Index: lib/Target/ARM/ARMInstrFormats.td =================================================================== --- lib/Target/ARM/ARMInstrFormats.td +++ lib/Target/ARM/ARMInstrFormats.td @@ -246,6 +246,15 @@ let ParserMatchClass = shr_imm64_asm_operand; } + +// ARM Assembler operand for ldr Rd, =expression which generates an offset +// to a constant pool entry or a MOV depending on the value of expression +def const_pool_asm_operand : AsmOperandClass { let Name = "ConstPoolAsmImm"; } +def const_pool_asm_imm : Operand { + let ParserMatchClass = const_pool_asm_operand; +} + + //===----------------------------------------------------------------------===// // ARM Assembler alias templates. // Index: lib/Target/ARM/ARMInstrInfo.td =================================================================== --- lib/Target/ARM/ARMInstrInfo.td +++ lib/Target/ARM/ARMInstrInfo.td @@ -2792,6 +2792,12 @@ : ARMAsmPseudo<"ldrbt${q} $Rt, $addr", (ins addr_offset_none:$addr, pred:$q), (outs GPR:$Rt)>; +// Pseudo instruction ldr Rt, =immediate +def LDRConstPool + : ARMAsmPseudo<"ldr${q} $Rt, $immediate", + (ins const_pool_asm_imm:$immediate, pred:$q), + (outs GPR:$Rt)>; + // Store // Stores with truncate Index: lib/Target/ARM/ARMInstrThumb.td =================================================================== --- lib/Target/ARM/ARMInstrThumb.td +++ lib/Target/ARM/ARMInstrThumb.td @@ -1543,3 +1543,8 @@ (tLSRri tGPR:$Rdm, cc_out:$s, tGPR:$Rdm, imm_sr:$imm, pred:$p)>; def : tInstAlias<"asr${s}${p} $Rdm, $imm", (tASRri tGPR:$Rdm, cc_out:$s, tGPR:$Rdm, imm_sr:$imm, pred:$p)>; + +// Pseudo instruction ldr Rt, =immediate +def tLDRConstPool + : tAsmPseudo<"ldr${p} $Rt, $immediate", + (ins tGPR:$Rt, const_pool_asm_imm:$immediate, pred:$p)>; Index: lib/Target/ARM/ARMInstrThumb2.td =================================================================== --- lib/Target/ARM/ARMInstrThumb2.td +++ lib/Target/ARM/ARMInstrThumb2.td @@ -4802,6 +4802,11 @@ def : t2InstAlias<"add${p} $Rd, pc, $imm", (t2ADR rGPR:$Rd, imm0_4095:$imm, pred:$p)>; +// Pseudo instruction ldr Rt, =immediate +def t2LDRConstPool + : t2AsmPseudo<"ldr${p} $Rt, $immediate", + (ins GPRnopc:$Rt, const_pool_asm_imm:$immediate, pred:$p)>; + // PLD/PLDW/PLI with alternate literal form. def : t2InstAlias<"pld${p} $addr", (t2PLDpci t2ldr_pcrel_imm12:$addr, pred:$p)>; Index: lib/Target/ARM/AsmParser/ARMAsmParser.cpp =================================================================== --- lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -427,8 +427,9 @@ k_ShifterImmediate, k_RotateImmediate, k_ModifiedImmediate, + k_ConstantPoolImmediate, k_BitfieldDescriptor, - k_Token + k_Token, } Kind; SMLoc StartLoc, EndLoc, AlignmentLoc; @@ -621,6 +622,11 @@ return Imm.Val; } + const MCExpr *getConstantPoolImm() const { + assert(isConstantPoolImm() && "Invalid access!"); + return Imm.Val; + } + unsigned getVectorIndex() const { assert(Kind == k_VectorIndex && "Invalid access!"); return VectorIndex.Val; @@ -658,7 +664,9 @@ bool isCCOut() const { return Kind == k_CCOut; } bool isITMask() const { return Kind == k_ITCondMask; } bool isITCondCode() const { return Kind == k_CondCode; } - bool isImm() const override { return Kind == k_Immediate; } + bool isImm() const override { + return Kind == k_Immediate; + } // checks whether this operand is an unsigned offset which fits is a field // of specified width and scaled by a specific number of bits template @@ -1046,6 +1054,7 @@ return ARM_AM::getSOImmVal(Value) == -1 && ARM_AM::getSOImmVal(-Value) != -1; } + bool isConstantPoolImm() const { return Kind == k_ConstantPoolImmediate; } bool isBitfield() const { return Kind == k_BitfieldDescriptor; } bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } bool isPostIdxReg() const { @@ -1343,6 +1352,7 @@ // If we have an immediate that's not a constant, treat it as a label // reference needing a fixup. If it is a constant, it's something else // and we reject it. + if (isImm() && !isa(getImm())) return true; @@ -1353,6 +1363,11 @@ int64_t Val = Memory.OffsetImm->getValue(); return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); } + bool isConstPoolAsmImm() const { + // Delay processing of Constant Pool Immediate, this will turn into + // a constant. Match no other operand + return (isConstantPoolImm()); + } bool isPostIdxImm8() const { if (!isImm()) return false; const MCConstantExpr *CE = dyn_cast(getImm()); @@ -1965,6 +1980,7 @@ } const MCSymbolRefExpr *SR = dyn_cast(Imm.Val); + assert(SR && "Unknown value type!"); Inst.addOperand(MCOperand::createExpr(SR)); return; @@ -2260,6 +2276,14 @@ Inst.addOperand(MCOperand::createImm(Val)); } + void addConstPoolAsmImmOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + // This is container for the immediate that we will create the constant + // pool from + addExpr(Inst, getConstantPoolImm()); + return; + } + void addMemTBBOperands(MCInst &Inst, unsigned N) const { assert(N == 2 && "Invalid number of operands!"); Inst.addOperand(MCOperand::createReg(Memory.BaseRegNum)); @@ -2640,6 +2664,15 @@ } static std::unique_ptr + CreateConstantPoolImm(const MCExpr *Val, SMLoc S, SMLoc E) { + auto Op = make_unique(k_ConstantPoolImmediate); + Op->Imm.Val = Val; + Op->StartLoc = S; + Op->EndLoc = E; + return Op; + } + + static std::unique_ptr CreateBitfield(unsigned LSB, unsigned Width, SMLoc S, SMLoc E) { auto Op = make_unique(k_BitfieldDescriptor); Op->Bitfield.LSB = LSB; @@ -2896,6 +2929,9 @@ OS << ""; break; + case k_ConstantPoolImmediate: + OS << ""; @@ -5217,10 +5253,7 @@ if (getParser().parseExpression(SubExprVal)) return true; E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); - - const MCExpr *CPLoc = - getTargetStreamer().addConstantPoolEntry(SubExprVal, S); - Operands.push_back(ARMOperand::CreateImm(CPLoc, S, E)); + Operands.push_back(ARMOperand::CreateConstantPoolImm(SubExprVal, S, E)); return false; } } @@ -6835,6 +6868,34 @@ case ARM::t2LDRSHpcrel: Inst.setOpcode(ARM::t2LDRSHpci); return true; + case ARM::LDRConstPool: + case ARM::tLDRConstPool: + case ARM::t2LDRConstPool: { + // Handle the pseudo instruction for ldr rn,= + // For now we always create the constant pool entry and load from it + // FIXME: Use a MOV or MVN when the immediate will fit + MCInst TmpInst; + if (Inst.getOpcode() == ARM::LDRConstPool) + TmpInst.setOpcode(ARM::LDRi12); + else if (Inst.getOpcode() == ARM::tLDRConstPool) + TmpInst.setOpcode(ARM::tLDRpci); + else if (Inst.getOpcode() == ARM::t2LDRConstPool) + TmpInst.setOpcode(ARM::t2LDRpci); + const ARMOperand &PoolOperand = + static_cast(*Operands[3]); + const MCExpr *SubExprVal = PoolOperand.getConstantPoolImm(); + const MCExpr *CPLoc = + getTargetStreamer().addConstantPoolEntry(SubExprVal, + PoolOperand.getStartLoc()); + TmpInst.addOperand(Inst.getOperand(0)); // Rt + TmpInst.addOperand(MCOperand::createExpr(CPLoc)); // offset to constpool + if (TmpInst.getOpcode() == ARM::LDRi12) + TmpInst.addOperand(MCOperand::createImm(0)); // unused offset + TmpInst.addOperand(Inst.getOperand(2)); // CondCode + TmpInst.addOperand(Inst.getOperand(3)); // CondCode + Inst = TmpInst; + return true; + } // Handle NEON VST complex aliases. case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_16: