diff --git a/llvm/lib/Target/ARM/ARMInstrFormats.td b/llvm/lib/Target/ARM/ARMInstrFormats.td --- a/llvm/lib/Target/ARM/ARMInstrFormats.td +++ b/llvm/lib/Target/ARM/ARMInstrFormats.td @@ -465,6 +465,8 @@ : AsmPseudoInst, Requires<[HasVFP2]>; class NEONAsmPseudo : AsmPseudoInst, Requires<[HasNEON]>; +class MVEAsmPseudo + : AsmPseudoInst, Requires<[HasMVEInt]>; // Pseudo instructions for the code generator. class PseudoInst pattern> diff --git a/llvm/lib/Target/ARM/ARMInstrInfo.td b/llvm/lib/Target/ARM/ARMInstrInfo.td --- a/llvm/lib/Target/ARM/ARMInstrInfo.td +++ b/llvm/lib/Target/ARM/ARMInstrInfo.td @@ -461,6 +461,30 @@ let ParserMatchClass = RotImmAsmOperand; } +// Vector indexing +class MVEVectorIndexOperand : AsmOperandClass { + let Name = "MVEVectorIndex"#size; + let RenderMethod = "addMVEVectorIndexOperands"; + let PredicateMethod = "isMVEVectorIndex<"#size#">"; +} + +class MVEVectorIndex : Operand { + let PrintMethod = "printVectorIndex"; + let EncoderMethod = "getMVEVectorIndexOpValue<"#opval#">"; + let DecoderMethod = "DecodeMVEVectorIndexOperand<"#opval#">"; + let MIOperandInfo = (ops i32imm); +} + +def MVEVectorIndex8 : MVEVectorIndex<"4"> { + let ParserMatchClass = MVEVectorIndexOperand<"16">; +} +def MVEVectorIndex16 : MVEVectorIndex<"2"> { + let ParserMatchClass = MVEVectorIndexOperand<"8">; +} +def MVEVectorIndex32 : MVEVectorIndex<"1"> { + let ParserMatchClass = MVEVectorIndexOperand<"4">; +} + // shift_imm: An integer that encodes a shift amount and the type of shift // (asr or lsl). The 6-bit immediate encodes as: // {5} 0 ==> lsl diff --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td --- a/llvm/lib/Target/ARM/ARMInstrMVE.td +++ b/llvm/lib/Target/ARM/ARMInstrMVE.td @@ -10,6 +10,44 @@ // //===----------------------------------------------------------------------===// +class ExpandImmAsmOp : AsmOperandClass { + let Name = !strconcat("ExpandImm", shift); + let PredicateMethod = !strconcat("isExpImm<", shift, ">"); + let RenderMethod = "addImmOperands"; +} +class InvertedExpandImmAsmOp : AsmOperandClass { + let Name = !strconcat("InvertedExpandImm", shift, "_", size); + let PredicateMethod = !strconcat("isInvertedExpImm<", shift, ",", size, ">"); + let RenderMethod = "addImmOperands"; +} + +class ExpandImm : Operand { + let ParserMatchClass = ExpandImmAsmOp; + let EncoderMethod = !strconcat("getExpandedImmOpValue<",shift,",false>"); + let DecoderMethod = !strconcat("DecodeExpandedImmOperand<",shift,">"); + let PrintMethod = "printExpandedImmOperand"; +} +class InvertedExpandImm : Operand { + let ParserMatchClass = InvertedExpandImmAsmOp; + let EncoderMethod = !strconcat("getExpandedImmOpValue<",shift,",true>"); + let PrintMethod = "printExpandedImmOperand"; + // No decoder method needed, because this operand type is only used + // by aliases (VAND and VORN) +} + +def expzero00 : ExpandImm<"0">; +def expzero08 : ExpandImm<"8">; +def expzero16 : ExpandImm<"16">; +def expzero24 : ExpandImm<"24">; + +def expzero00inv16 : InvertedExpandImm<"0", "16">; +def expzero08inv16 : InvertedExpandImm<"8", "16">; + +def expzero00inv32 : InvertedExpandImm<"0", "32">; +def expzero08inv32 : InvertedExpandImm<"8", "32">; +def expzero16inv32 : InvertedExpandImm<"16", "32">; +def expzero24inv32 : InvertedExpandImm<"24", "32">; + // VPT condition mask def vpt_mask : Operand { let PrintMethod = "printVPTMask"; @@ -113,6 +151,16 @@ let DecoderNamespace = "MVE"; } +class VMOV pattern> + : Thumb2I, + Requires<[HasV8_1MMainline, HasMVEInt]> { + let D = MVEDomain; + let DecoderNamespace = "MVE"; +} + class t2MVEShift pattern=[]> : MVE_MI_with_pred { @@ -1280,6 +1328,261 @@ } // end of mve_shift instructions +// start of mve_bit instructions + +class MVE_bit_arith pattern=[]> + : MVE_p { + bits<4> Qd; + bits<4> Qm; + + let Inst{22} = Qd{3}; + let Inst{15-13} = Qd{2-0}; + let Inst{5} = Qm{3}; + let Inst{3-1} = Qm{2-0}; +} + +def VBIC : MVE_bit_arith<(outs MQPR:$Qd), + (ins MQPR:$Qn, MQPR:$Qm), "vbic", "", + "$Qd, $Qn, $Qm", ""> { + bits<4> Qn; + + let Inst{28} = 0b0; + let Inst{25-23} = 0b110; + let Inst{21-20} = 0b01; + let Inst{19-17} = Qn{2-0}; + let Inst{16} = 0b0; + let Inst{12-8} = 0b00001; + let Inst{7} = Qn{3}; + let Inst{6} = 0b1; + let Inst{4} = 0b1; + let Inst{0} = 0b0; +} + +class t2VREV size, bits<2> bit_8_7> + : MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qm), iname, + suffix, "$Qd, $Qm", ""> { + + let Inst{28} = 0b1; + let Inst{25-23} = 0b111; + let Inst{21-20} = 0b11; + let Inst{19-18} = size{1-0}; + let Inst{17-16} = 0b00; + let Inst{12-9} = 0b0000; + let Inst{8-7} = bit_8_7{1-0}; + let Inst{6} = 0b1; + let Inst{4} = 0b0; + let Inst{0} = 0b0; +} + +def VREV64_8 : t2VREV<"vrev64", "8", 0b00, 0b00>; +def VREV64_16 : t2VREV<"vrev64", "16", 0b01, 0b00>; +def VREV64_32 : t2VREV<"vrev64", "32", 0b10, 0b00>; + +def VREV32_8 : t2VREV<"vrev32", "8", 0b00, 0b01>; +def VREV32_16 : t2VREV<"vrev32", "16", 0b01, 0b01>; + +def VREV16_8 : t2VREV<"vrev16", "8", 0b00, 0b10>; + +def VMVN : MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qm), + "vmvn", "", "$Qd, $Qm", ""> { + let Inst{28} = 0b1; + let Inst{25-23} = 0b111; + let Inst{21-16} = 0b110000; + let Inst{12-6} = 0b0010111; + let Inst{4} = 0b0; + let Inst{0} = 0b0; +} + +class MVE_bit_ops bit_21_20, bit bit_28> + : MVE_bit_arith<(outs MQPR:$Qd), (ins MQPR:$Qn, MQPR:$Qm), + iname, "", "$Qd, $Qn, $Qm", ""> { + bits<4> Qn; + + let Inst{28} = bit_28; + let Inst{25-23} = 0b110; + let Inst{21-20} = bit_21_20{1-0}; + let Inst{19-17} = Qn{2-0}; + let Inst{16} = 0b0; + let Inst{12-8} = 0b00001; + let Inst{7} = Qn{3}; + let Inst{6} = 0b1; + let Inst{4} = 0b1; + let Inst{0} = 0b0; +} + +def VEOR : MVE_bit_ops<"veor", 0b00, 0b1>; +def VORN : MVE_bit_ops<"vorn", 0b11, 0b0>; +def MVE_VORR : MVE_bit_ops<"vorr", 0b10, 0b0>; +def MVE_VAND : MVE_bit_ops<"vand", 0b00, 0b0>; + +// add ignored suffixes as aliases + +foreach s=["s8", "s16", "s32", "u8", "u16", "u32", "i8", "i16", "i32", "f16", "f32"] in { + def : MVEInstAlias; + def : MVEInstAlias; + def : MVEInstAlias; + def : MVEInstAlias; + def : MVEInstAlias; +} + +class MVE_bit_cmode cmode, dag inOps> + : MVE_p<(outs MQPR:$Qd), inOps, NoItinerary, + iname, suffix, "$Qd, $imm", vpred_n, "$Qd = $Qd_src"> { + bits<8> imm; + bits<4> Qd; + + let Inst{28} = imm{7}; + let Inst{27-23} = 0b11111; + let Inst{22} = Qd{3}; + let Inst{21-19} = 0b000; + let Inst{18-16} = imm{6-4}; + let Inst{15-13} = Qd{2-0}; + let Inst{12} = 0b0; + let Inst{11-8} = cmode{3-0}; + let Inst{7-6} = 0b01; + let Inst{4} = 0b1; + let Inst{3-0} = imm{3-0}; +} + +class MVE_VORR cmode, dag inOps> + : MVE_bit_cmode<"vorr", suffix, cmode, inOps> { + let Inst{5} = 0b0; +} + +def VORRIZ0v4i32 : MVE_VORR<"i32", 0b0001, (ins MQPR:$Qd_src, expzero00:$imm)>; +def VORRIZ0v8i16 : MVE_VORR<"i16", 0b1001, (ins MQPR:$Qd_src, expzero00:$imm)>; +def VORRIZ8v4i32 : MVE_VORR<"i32", 0b0011, (ins MQPR:$Qd_src, expzero08:$imm)>; +def VORRIZ8v8i16 : MVE_VORR<"i16", 0b1011, (ins MQPR:$Qd_src, expzero08:$imm)>; +def VORRIZ16v4i32 : MVE_VORR<"i32", 0b0101, (ins MQPR:$Qd_src, expzero16:$imm)>; +def VORRIZ24v4i32 : MVE_VORR<"i32", 0b0111, (ins MQPR:$Qd_src, expzero24:$imm)>; + +def VORNIZ0v4i32 : MVEAsmPseudo<"vorn${vp}.i32\t$Qd, $imm", + (ins MQPR:$Qd_src, expzero00inv32:$imm, vpred_n:$vp), + (outs MQPR:$Qd)>; +def VORNIZ0v8i16 : MVEAsmPseudo<"vorn${vp}.i16\t$Qd, $imm", + (ins MQPR:$Qd_src, expzero00inv16:$imm, vpred_n:$vp), + (outs MQPR:$Qd)>; +def VORNIZ8v4i32 : MVEAsmPseudo<"vorn${vp}.i32\t$Qd, $imm", + (ins MQPR:$Qd_src, expzero08inv32:$imm, vpred_n:$vp), + (outs MQPR:$Qd)>; +def VORNIZ8v8i16 : MVEAsmPseudo<"vorn${vp}.i16\t$Qd, $imm", + (ins MQPR:$Qd_src, expzero08inv16:$imm, vpred_n:$vp), + (outs MQPR:$Qd)>; +def VORNIZ16v4i32 : MVEAsmPseudo<"vorn${vp}.i32\t$Qd, $imm", + (ins MQPR:$Qd_src, expzero16inv32:$imm, vpred_n:$vp), + (outs MQPR:$Qd)>; +def VORNIZ24v4i32 : MVEAsmPseudo<"vorn${vp}.i32\t$Qd, $imm", + (ins MQPR:$Qd_src, expzero24inv32:$imm, vpred_n:$vp), + (outs MQPR:$Qd)>; + +def VMOVt1 : MVEInstAlias<"vmov${vp}\t$Qd, $Qm", + (MVE_VORR MQPR:$Qd, MQPR:$Qm, MQPR:$Qm, vpred_r:$vp)>; + +class MVE_VBIC cmode, dag inOps> + : MVE_bit_cmode<"vbic", suffix, cmode, inOps> { + let Inst{5} = 0b1; +} + +def VBICIZ0v4i32 : MVE_VBIC<"i32", 0b0001, (ins MQPR:$Qd_src, expzero00:$imm)>; +def VBICIZ0v8i16 : MVE_VBIC<"i16", 0b1001, (ins MQPR:$Qd_src, expzero00:$imm)>; +def VBICIZ8v4i32 : MVE_VBIC<"i32", 0b0011, (ins MQPR:$Qd_src, expzero08:$imm)>; +def VBICIZ8v8i16 : MVE_VBIC<"i16", 0b1011, (ins MQPR:$Qd_src, expzero08:$imm)>; +def VBICIZ16v4i32 : MVE_VBIC<"i32", 0b0101, (ins MQPR:$Qd_src, expzero16:$imm)>; +def VBICIZ24v4i32 : MVE_VBIC<"i32", 0b0111, (ins MQPR:$Qd_src, expzero24:$imm)>; + +def VANDIZ0v4i32 : MVEAsmPseudo<"vand${vp}.i32\t$Qda, $imm", + (ins MQPR:$Qda_src, expzero00inv32:$imm, vpred_n:$vp), + (outs MQPR:$Qda)>; +def VANDIZ0v8i16 : MVEAsmPseudo<"vand${vp}.i16\t$Qda, $imm", + (ins MQPR:$Qda_src, expzero00inv16:$imm, vpred_n:$vp), + (outs MQPR:$Qda)>; +def VANDIZ8v4i32 : MVEAsmPseudo<"vand${vp}.i32\t$Qda, $imm", + (ins MQPR:$Qda_src, expzero08inv32:$imm, vpred_n:$vp), + (outs MQPR:$Qda)>; +def VANDIZ8v8i16 : MVEAsmPseudo<"vand${vp}.i16\t$Qda, $imm", + (ins MQPR:$Qda_src, expzero08inv16:$imm, vpred_n:$vp), + (outs MQPR:$Qda)>; +def VANDIZ16v4i32 : MVEAsmPseudo<"vand${vp}.i32\t$Qda, $imm", + (ins MQPR:$Qda_src, expzero16inv32:$imm, vpred_n:$vp), + (outs MQPR:$Qda)>; +def VANDIZ24v4i32 : MVEAsmPseudo<"vand${vp}.i32\t$Qda, $imm", + (ins MQPR:$Qda_src, expzero24inv32:$imm, vpred_n:$vp), + (outs MQPR:$Qda)>; + +class VMOVt1 + : VMOV { + bits<4> Qd; + bits<5> Rt; + bits<5> Idx; + + let Inst{31-24} = 0b11101110; + let Inst{23} = U; + let Inst{22-21} = Idx{3-2}; + let Inst{20} = bit_20; + let Inst{19-17} = Qd{2-0}; + let Inst{16} = Idx{4}; + let Inst{15-12} = Rt{3-0}; + let Inst{11-8} = 0b1011; + let Inst{7} = Qd{3}; + let Inst{6-5} = Idx{1-0}; + let Inst{4-0} = 0b10000; +} + +class VMOVt1vectoreg + : VMOVt1; + +class VMOVt1regtovec + : VMOVt1; + +def VMOVt1vtor32 : VMOVt1vectoreg<"32", 0b0, (ins MVEVectorIndex32:$Idx)> { + let Inst{22} = 0b0; + let Inst{6-5} = 0b00; + let Predicates = [HasFPRegsV8_1M]; +} + +def VMOVt1s16 : VMOVt1vectoreg<"s16", 0b0, (ins MVEVectorIndex16:$Idx)> { + let Inst{22} = 0b0; + let Inst{5} = 0b1; +} + +def VMOVt1s8 : VMOVt1vectoreg<"s8", 0b0, (ins MVEVectorIndex8:$Idx)> { + let Inst{22} = 0b1; +} + +def VMOVt1u16 : VMOVt1vectoreg<"u16", 0b1, (ins MVEVectorIndex16:$Idx)> { + let Inst{22} = 0b0; + let Inst{5} = 0b1; +} + +def VMOVt1u8 : VMOVt1vectoreg<"u8", 0b1, (ins MVEVectorIndex8:$Idx)> { + let Inst{22} = 0b1; +} + +def VMOVt18 : VMOVt1regtovec<"8", 0b0, (ins MVEVectorIndex8:$Idx)> { + let Inst{22} = 0b1; +} +def VMOVt116 : VMOVt1regtovec<"16", 0b0, (ins MVEVectorIndex16:$Idx)> { + let Inst{22} = 0b0; + let Inst{5} = 0b1; +} +def VMOVt132 : VMOVt1regtovec<"32", 0b0, (ins MVEVectorIndex32:$Idx)> { + let Inst{22} = 0b0; + let Inst{6-5} = 0b00; + let Predicates = [HasFPRegsV8_1M]; +} + +// end of mve_bit instructions + class t2VPT size, dag iops, string asm, list pattern=[]> : MVE_MI<(outs ), iops, NoItinerary, !strconcat("vpt", "${Mk}", ".", suffix), asm, "", pattern> { bits<3> fc; diff --git a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp --- a/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp +++ b/llvm/lib/Target/ARM/AsmParser/ARMAsmParser.cpp @@ -1153,6 +1153,34 @@ return isImmediate<1, 33>(); } + template + bool isExpImmValue(uint64_t Value) const { + uint64_t mask = (1 << shift) - 1; + if ((Value & mask) != 0 || (Value >> shift) > 0xff) + return false; + return true; + } + + template + bool isExpImm() const { + if (!isImm()) return false; + const MCConstantExpr *CE = dyn_cast(getImm()); + if (!CE) return false; + + return isExpImmValue(CE->getValue()); + } + + template + bool isInvertedExpImm() const { + if (!isImm()) return false; + const MCConstantExpr *CE = dyn_cast(getImm()); + if (!CE) return false; + + uint64_t OriginalValue = CE->getValue(); + uint64_t InvertedValue = OriginalValue ^ (((uint64_t)1 << size) - 1); + return isExpImmValue(InvertedValue); + } + bool isPKHLSLImm() const { return isImmediate<0, 32>(); } @@ -1926,6 +1954,12 @@ return VectorIndex.Val < 1; } + template + bool isMVEVectorIndex() const { + if (Kind != k_VectorIndex) return false; + return VectorIndex.Val < size; + } + bool isNEONi8splat() const { if (!isImm()) return false; const MCConstantExpr *CE = dyn_cast(getImm()); @@ -2971,6 +3005,11 @@ Inst.addOperand(MCOperand::createImm(getVectorIndex())); } + void addMVEVectorIndexOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::createImm(getVectorIndex())); + } + void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { assert(N == 1 && "Invalid number of operands!"); // The immediate encodes the type of constant as well as the value. @@ -5945,7 +5984,8 @@ Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && Mnemonic != "sbcs" && Mnemonic != "rscs" && !(hasMVE() && - (Mnemonic.startswith("vmine") || Mnemonic.startswith("vshl")))) { + (Mnemonic.startswith("vmine") || Mnemonic.startswith("vshl") || + Mnemonic.startswith("vmvne") || Mnemonic.startswith("vorne")))) { unsigned CC = ARMCondCodeFromString(Mnemonic.substr(Mnemonic.size()-2)); if (CC != ~0U) { Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); @@ -7724,6 +7764,50 @@ } switch (Inst.getOpcode()) { + case ARM::VORNIZ0v4i32: + case ARM::VORNIZ0v8i16: + case ARM::VORNIZ8v4i32: + case ARM::VORNIZ8v8i16: + case ARM::VORNIZ16v4i32: + case ARM::VORNIZ24v4i32: + case ARM::VANDIZ0v4i32: + case ARM::VANDIZ0v8i16: + case ARM::VANDIZ8v4i32: + case ARM::VANDIZ8v8i16: + case ARM::VANDIZ16v4i32: + case ARM::VANDIZ24v4i32: { + unsigned Opcode; + bool imm16 = false; + switch(Inst.getOpcode()) { + case ARM::VORNIZ0v4i32: Opcode = ARM::VORRIZ0v4i32; break; + case ARM::VORNIZ0v8i16: Opcode = ARM::VORRIZ0v8i16; imm16 = true; break; + case ARM::VORNIZ8v4i32: Opcode = ARM::VORRIZ8v4i32; break; + case ARM::VORNIZ8v8i16: Opcode = ARM::VORRIZ8v8i16; imm16 = true; break; + case ARM::VORNIZ16v4i32: Opcode = ARM::VORRIZ16v4i32; break; + case ARM::VORNIZ24v4i32: Opcode = ARM::VORRIZ24v4i32; break; + case ARM::VANDIZ0v4i32: Opcode = ARM::VBICIZ0v4i32; break; + case ARM::VANDIZ0v8i16: Opcode = ARM::VBICIZ0v8i16; imm16 = true; break; + case ARM::VANDIZ8v4i32: Opcode = ARM::VBICIZ8v4i32; break; + case ARM::VANDIZ8v8i16: Opcode = ARM::VBICIZ8v8i16; imm16 = true; break; + case ARM::VANDIZ16v4i32: Opcode = ARM::VBICIZ16v4i32; break; + case ARM::VANDIZ24v4i32: Opcode = ARM::VBICIZ24v4i32; break; + default: llvm_unreachable("unexpected opcode"); + } + + MCInst TmpInst; + TmpInst.setOpcode(Opcode); + TmpInst.addOperand(Inst.getOperand(0)); + TmpInst.addOperand(Inst.getOperand(1)); + + // invert immediate + unsigned imm = ~Inst.getOperand(2).getImm() & (imm16 ? 0xffff : 0xffffffff); + TmpInst.addOperand(MCOperand::createImm(imm)); + + TmpInst.addOperand(Inst.getOperand(3)); + TmpInst.addOperand(Inst.getOperand(4)); + Inst = TmpInst; + return true; + } // Alias for alternate form of 'ldr{,b}t Rt, [Rn], #imm' instruction. case ARM::LDRT_POST: case ARM::LDRBT_POST: { diff --git a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp --- a/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp +++ b/llvm/lib/Target/ARM/Disassembler/ARMDisassembler.cpp @@ -507,6 +507,14 @@ static DecodeStatus DecodeVSTRVLDR_SYSREG(MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder); +template +static DecodeStatus DecodeExpandedImmOperand(MCInst &Inst, unsigned Val, + uint64_t Address, + const void *Decoder); +template +static DecodeStatus DecodeMVEVectorIndexOperand(MCInst &Inst, unsigned Val, + uint64_t Address, + const void *Decoder); static DecodeStatus DecodeMVEVSHLL(MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder); static DecodeStatus DecodeMVEOverlappingLongShift(MCInst &Inst, unsigned Insn, @@ -6020,6 +6028,37 @@ return S; } +template +static DecodeStatus DecodeExpandedImmOperand(MCInst &Inst, unsigned Val, + uint64_t Address, + const void *Decoder) { + Val <<= shift; + + Inst.addOperand(MCOperand::createImm(Val)); + return MCDisassembler::Success; +} + +template +static DecodeStatus DecodeMVEVectorIndexOperand(MCInst &Inst, unsigned Val, + uint64_t Address, + const void *Decoder) { + DecodeStatus S = MCDisassembler::Success; + static_assert((size == 1 || size == 2 || size == 4), "Size must be 1, 2 or 4."); + + unsigned Imm = 0; + unsigned Beat = ((Val >> 2) & 1) | ((Val >> 3) & 2); + if (size == 4) + Imm = Val & 3; + else if (size == 2) + Imm = (Val >> 1) & 1; + + Imm += size * Beat; + + Inst.addOperand(MCOperand::createImm(Imm)); + + return S; +} + static DecodeStatus DecodeMVEVSHLL(MCInst &Inst, unsigned Insn, uint64_t Address, const void *Decoder) { DecodeStatus S = MCDisassembler::Success; diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.h b/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.h --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.h +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.h @@ -252,6 +252,8 @@ raw_ostream &O); void printVPTMask(const MCInst *MI, unsigned OpNum, const MCSubtargetInfo &STI, raw_ostream &O); + void printExpandedImmOperand(const MCInst *MI, unsigned OpNum, + const MCSubtargetInfo &STI, raw_ostream &O); private: unsigned DefaultAltIdx = ARM::NoRegAltName; diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMInstPrinter.cpp @@ -1623,3 +1623,11 @@ } } +void ARMInstPrinter::printExpandedImmOperand(const MCInst *MI, unsigned OpNum, + const MCSubtargetInfo &STI, + raw_ostream &O) { + uint32_t Val = MI->getOperand(OpNum).getImm(); + O << markup(""); +} diff --git a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp --- a/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp +++ b/llvm/lib/Target/ARM/MCTargetDesc/ARMMCCodeEmitter.cpp @@ -394,6 +394,14 @@ unsigned getThumbSRImmOpValue(const MCInst &MI, unsigned Op, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + template + unsigned getExpandedImmOpValue(const MCInst &MI, unsigned Op, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + static_assert(shift <= 32, "Shift count must be less than or equal to 32."); + const MCOperand MO = MI.getOperand(Op); + return (invert ? (MO.getImm() ^ 0xff) : MO.getImm()) >> shift; + } unsigned NEONThumb2DataIPostEncoder(const MCInst &MI, unsigned EncodedValue, @@ -446,6 +454,10 @@ uint32_t getRestrictedCondCodeOpValue(const MCInst &MI, unsigned OpIdx, SmallVectorImpl &Fixups, const MCSubtargetInfo &STI) const; + template + uint32_t getMVEVectorIndexOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const; }; } // end anonymous namespace @@ -1891,6 +1903,32 @@ return 3; } +template +uint32_t ARMMCCodeEmitter:: +getMVEVectorIndexOpValue(const MCInst &MI, unsigned OpIdx, + SmallVectorImpl &Fixups, + const MCSubtargetInfo &STI) const { + const MCOperand MO = MI.getOperand(OpIdx); + assert(MO.isImm() && "Unexpected operand type!"); + static_assert(size == 1 || size == 2 || size == 4, "Size must be 1, 2, or 4."); + + int Value = MO.getImm(); + + // This is encoded on a beat basis + int Mod = Value % size; + int Div = Value / size; + + int Imm = 0; + if (size == 4) { + Imm = 8; + Imm |= Mod; + } else if (size == 2) { + Imm = 1; + Imm |= Mod << 1; + } + return Imm | (Div & 2) << 3 | (Div & 1) << 2; +} + #include "ARMGenMCCodeEmitter.inc" MCCodeEmitter *llvm::createARMLEMCCodeEmitter(const MCInstrInfo &MCII, diff --git a/llvm/test/MC/ARM/mve-bitops.s b/llvm/test/MC/ARM/mve-bitops.s new file mode 100644 --- /dev/null +++ b/llvm/test/MC/ARM/mve-bitops.s @@ -0,0 +1,469 @@ +# RUN: not llvm-mc -triple=thumbv8.1m.main-none-eabi -mattr=+mve -show-encoding < %s \ +# RUN: | FileCheck --check-prefix=CHECK-NOFP %s +# RUN: not llvm-mc -triple=thumbv8.1m.main-none-eabi -mattr=+mve.fp,+fp64 -show-encoding < %s 2>%t \ +# RUN: | FileCheck --check-prefix=CHECK %s +# RUN: FileCheck --check-prefix=ERROR < %t %s + +# CHECK: vmov.i32 q0, #0x1bff @ encoding: [0x81,0xef,0x5b,0x0c] +# CHECK-NOFP: vmov.i32 q0, #0x1bff @ encoding: [0x81,0xef,0x5b,0x0c] +vmov.i32 q0, #0x1bff + +# CHECK: vmov.i16 q0, #0x5c @ encoding: [0x85,0xef,0x5c,0x08] +# CHECK-NOFP: vmov.i16 q0, #0x5c @ encoding: [0x85,0xef,0x5c,0x08] +vmov.i16 q0, #0x5c + +# CHECK: vmov.i8 q0, #0x4c @ encoding: [0x84,0xef,0x5c,0x0e] +# CHECK-NOFP: vmov.i8 q0, #0x4c @ encoding: [0x84,0xef,0x5c,0x0e] +vmov.i8 q0, #0x4c + +# CHECK: vmov.f32 q0, #-3.625000e+00 @ encoding: [0x80,0xff,0x5d,0x0f] +# CHECK-NOFP: vmov.f32 q0, #-3.625000e+00 @ encoding: [0x80,0xff,0x5d,0x0f] +vmov.f32 q0, #-3.625000e+00 + +# CHECK: vmov.f32 s16, s1 @ encoding: [0xb0,0xee,0x60,0x8a] +# CHECK-NOFP: vmov.f32 s16, s1 @ encoding: [0xb0,0xee,0x60,0x8a] +vmov.f32 s16, s1 + +# CHECK: vmov.f64 d0, d1 @ encoding: [0xb0,0xee,0x41,0x0b] +# CHECK-NOFP: vmov.f64 d0, d1 @ encoding: [0xb0,0xee,0x41,0x0b] +vmov.f64 d0, d1 + +# CHECK: vmov.i64 q0, #0xff0000ffffffffff @ encoding: [0x81,0xff,0x7f,0x0e] +# CHECK-NOFP: vmov.i64 q0, #0xff0000ffffffffff @ encoding: [0x81,0xff,0x7f,0x0e] +vmov.i64 q0, #0xff0000ffffffffff + +# CHECK: vorr.i16 q0, #0x12 @ encoding: [0x81,0xef,0x52,0x09] +# CHECK-NOFP: vorr.i16 q0, #0x12 @ encoding: [0x81,0xef,0x52,0x09] +vorr.i16 q0, #0x12 + +# CHECK: vorr.i32 q0, #0x1200 @ encoding: [0x81,0xef,0x52,0x03] +# CHECK-NOFP: vorr.i32 q0, #0x1200 @ encoding: [0x81,0xef,0x52,0x03] +vorr.i32 q0, #0x1200 + +# CHECK: vorr.i16 q0, #0xed @ encoding: [0x86,0xff,0x5d,0x09] +# CHECK-NOFP: vorr.i16 q0, #0xed @ encoding: [0x86,0xff,0x5d,0x09] +vorn.i16 q0, #0xff12 + +# CHECK: vorr.i32 q0, #0xed00 @ encoding: [0x86,0xff,0x5d,0x03] +# CHECK-NOFP: vorr.i32 q0, #0xed00 @ encoding: [0x86,0xff,0x5d,0x03] +vorn.i32 q0, #0xffff12ff + +# CHECK: vorr.i32 q0, #0xed0000 @ encoding: [0x86,0xff,0x5d,0x05] +# CHECK-NOFP: vorr.i32 q0, #0xed0000 @ encoding: [0x86,0xff,0x5d,0x05] +vorn.i32 q0, #0xff12ffff + +# CHECK: vorr.i32 q0, #0xed000000 @ encoding: [0x86,0xff,0x5d,0x07] +# CHECK-NOFP: vorr.i32 q0, #0xed000000 @ encoding: [0x86,0xff,0x5d,0x07] +vorn.i32 q0, #0x12ffffff + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vorn.i16 q0, #0xed00 + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vorn.i16 q0, #0x00ed + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vorn.i32 q0, #0xed000000 + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vorn.i32 q0, #0x00ed0000 + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vorn.i32 q0, #0x0000ed00 + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vorn.i32 q0, #0x000000ed + +# CHECK: vbic.i16 q0, #0x22 @ encoding: [0x82,0xef,0x72,0x09] +# CHECK-NOFP: vbic.i16 q0, #0x22 @ encoding: [0x82,0xef,0x72,0x09] +vbic.i16 q0, #0x22 + +# CHECK: vbic.i32 q0, #0x1100 @ encoding: [0x81,0xef,0x71,0x03] +# CHECK-NOFP: vbic.i32 q0, #0x1100 @ encoding: [0x81,0xef,0x71,0x03] +vbic.i32 q0, #0x1100 + +# CHECK: vbic.i16 q0, #0xdd @ encoding: [0x85,0xff,0x7d,0x09] +# CHECK-NOFP: vbic.i16 q0, #0xdd @ encoding: [0x85,0xff,0x7d,0x09] +vand.i16 q0, #0xff22 + +# CHECK: vbic.i16 q0, #0xdd00 @ encoding: [0x85,0xff,0x7d,0x0b] +# CHECK-NOFP: vbic.i16 q0, #0xdd00 @ encoding: [0x85,0xff,0x7d,0x0b] +vand.i16 q0, #0x22ff + +# CHECK: vbic.i32 q0, #0xee @ encoding: [0x86,0xff,0x7e,0x01] +# CHECK-NOFP: vbic.i32 q0, #0xee @ encoding: [0x86,0xff,0x7e,0x01] +vand.i32 q0, #0xffffff11 + +# CHECK: vbic.i32 q0, #0xee00 @ encoding: [0x86,0xff,0x7e,0x03] +# CHECK-NOFP: vbic.i32 q0, #0xee00 @ encoding: [0x86,0xff,0x7e,0x03] +vand.i32 q0, #0xffff11ff + +# CHECK: vbic.i32 q0, #0xee0000 @ encoding: [0x86,0xff,0x7e,0x05] +# CHECK-NOFP: vbic.i32 q0, #0xee0000 @ encoding: [0x86,0xff,0x7e,0x05] +vand.i32 q0, #0xff11ffff + +# CHECK: vbic.i32 q0, #0xee000000 @ encoding: [0x86,0xff,0x7e,0x07] +# CHECK-NOFP: vbic.i32 q0, #0xee000000 @ encoding: [0x86,0xff,0x7e,0x07] +vand.i32 q0, #0x11ffffff + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vand.i16 q0, #0xed00 + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vand.i16 q0, #0x00ed + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vand.i32 q0, #0xed000000 + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vand.i32 q0, #0x00ed0000 + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vand.i32 q0, #0x0000ed00 + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vand.i32 q0, #0x000000ed + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +vbic q0, q1, q7 + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +vbic.s8 q0, q1, q7 + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +vbic.s16 q0, q1, q7 + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +vbic.s32 q0, q1, q7 + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +vbic.u8 q0, q1, q7 + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +vbic.u16 q0, q1, q7 + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +vbic.u32 q0, q1, q7 + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +vbic.i8 q0, q1, q7 + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +vbic.i16 q0, q1, q7 + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +vbic.i32 q0, q1, q7 + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +vbic.f16 q0, q1, q7 + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOFP: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +vbic.f32 q0, q1, q7 + +# CHECK: vrev64.8 q0, q4 @ encoding: [0xb0,0xff,0x48,0x00] +# CHECK-NOFP: vrev64.8 q0, q4 @ encoding: [0xb0,0xff,0x48,0x00] +vrev64.8 q0, q4 + +# CHECK: vrev64.16 q1, q3 @ encoding: [0xb4,0xff,0x46,0x20] +# CHECK-NOFP: vrev64.16 q1, q3 @ encoding: [0xb4,0xff,0x46,0x20] +vrev64.16 q1, q3 + +# CHECK: vrev64.32 q0, q2 @ encoding: [0xb8,0xff,0x44,0x00] +# CHECK-NOFP: vrev64.32 q0, q2 @ encoding: [0xb8,0xff,0x44,0x00] +vrev64.32 q0, q2 + +# CHECK: vrev32.8 q0, q1 @ encoding: [0xb0,0xff,0xc2,0x00] +# CHECK-NOFP: vrev32.8 q0, q1 @ encoding: [0xb0,0xff,0xc2,0x00] +vrev32.8 q0, q1 + +# CHECK: vrev32.16 q0, q5 @ encoding: [0xb4,0xff,0xca,0x00] +# CHECK-NOFP: vrev32.16 q0, q5 @ encoding: [0xb4,0xff,0xca,0x00] +vrev32.16 q0, q5 + +# CHECK: vrev16.8 q0, q2 @ encoding: [0xb0,0xff,0x44,0x01] +# CHECK-NOFP: vrev16.8 q0, q2 @ encoding: [0xb0,0xff,0x44,0x01] +vrev16.8 q0, q2 + +# CHECK: vmvn q0, q2 @ encoding: [0xb0,0xff,0xc4,0x05] +# CHECK-NOFP: vmvn q0, q2 @ encoding: [0xb0,0xff,0xc4,0x05] +vmvn q0, q2 + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +veor q2, q1, q7 + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +veor.s8 q2, q1, q7 + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +veor.s16 q2, q1, q7 + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +veor.s32 q2, q1, q7 + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +veor.u8 q2, q1, q7 + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +veor.u16 q2, q1, q7 + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +veor.u32 q2, q1, q7 + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +veor.i8 q2, q1, q7 + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +veor.i16 q2, q1, q7 + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +veor.i32 q2, q1, q7 + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +veor.f16 q2, q1, q7 + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOFP: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +veor.f32 q2, q1, q7 + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +vorn q0, q3, q2 + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +vorn.s8 q0, q3, q2 + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +vorn.s16 q0, q3, q2 + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +vorn.s32 q0, q3, q2 + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +vorn.u8 q0, q3, q2 + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +vorn.u16 q0, q3, q2 + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +vorn.u32 q0, q3, q2 + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +vorn.i8 q0, q3, q2 + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +vorn.i16 q0, q3, q2 + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +vorn.i32 q0, q3, q2 + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +vorn.f16 q0, q3, q2 + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOFP: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +vorn.f32 q0, q3, q2 + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +vorr q1, q2, q1 + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +vorr.s8 q1, q2, q1 + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +vorr.s16 q1, q2, q1 + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +vorr.s32 q1, q2, q1 + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +vorr.u8 q1, q2, q1 + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +vorr.u16 q1, q2, q1 + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +vorr.u32 q1, q2, q1 + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +vorr.i8 q1, q2, q1 + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +vorr.i16 q1, q2, q1 + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +vorr.i32 q1, q2, q1 + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +vorr.f16 q1, q2, q1 + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOFP: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +vorr.f32 q1, q2, q1 + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +vand q0, q2, q0 + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +vand.s8 q0, q2, q0 + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +vand.s16 q0, q2, q0 + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +vand.s32 q0, q2, q0 + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +vand.u8 q0, q2, q0 + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +vand.u16 q0, q2, q0 + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +vand.u32 q0, q2, q0 + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +vand.i8 q0, q2, q0 + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +vand.i16 q0, q2, q0 + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +vand.i32 q0, q2, q0 + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +vand.f16 q0, q2, q0 + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOFP: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +vand.f32 q0, q2, q0 + +# CHECK: vmov.8 q0[1], r8 @ encoding: [0x40,0xee,0x30,0x8b] +# CHECK-NOFP: vmov.8 q0[1], r8 @ encoding: [0x40,0xee,0x30,0x8b] +vmov.8 q0[1], r8 + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vmov.8 q0[16], r8 + +# CHECK: vmov.16 q0[2], r5 @ encoding: [0x20,0xee,0x30,0x5b] +# CHECK-NOFP: vmov.16 q0[2], r5 @ encoding: [0x20,0xee,0x30,0x5b] +vmov.16 q0[2], r5 + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vmov.16 q0[8], r5 + +# CHECK: vmov.32 q6[3], r11 @ encoding: [0x2d,0xee,0x10,0xbb] +# CHECK-NOFP: vmov.32 q6[3], r11 @ encoding: [0x2d,0xee,0x10,0xbb] +vmov.32 q6[3], r11 + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vmov.32 q6[4], r11 + +# CHECK: vmov.32 r0, q1[0] @ encoding: [0x12,0xee,0x10,0x0b] +# CHECK-NOFP: vmov.32 r0, q1[0] @ encoding: [0x12,0xee,0x10,0x0b] +vmov.32 r0, q1[0] + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vmov.32 r0, q1[4] + +# CHECK: vmov.s16 r1, q2[7] @ encoding: [0x35,0xee,0x70,0x1b] +# CHECK-NOFP: vmov.s16 r1, q2[7] @ encoding: [0x35,0xee,0x70,0x1b] +vmov.s16 r1, q2[7] + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vmov.s16 r1, q2[8] + +# CHECK: vmov.s8 r0, q4[13] @ encoding: [0x79,0xee,0x30,0x0b] +# CHECK-NOFP: vmov.s8 r0, q4[13] @ encoding: [0x79,0xee,0x30,0x0b] +vmov.s8 r0, q4[13] + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vmov.s8 r0, q4[16] + +# CHECK: vmov.u16 r0, q1[4] @ encoding: [0x93,0xee,0x30,0x0b] +# CHECK-NOFP: vmov.u16 r0, q1[4] @ encoding: [0x93,0xee,0x30,0x0b] +vmov.u16 r0, q1[4] + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vmov.u16 r0, q1[8] + +# CHECK: vmov.u8 r0, q5[7] @ encoding: [0xfa,0xee,0x70,0x0b] +# CHECK-NOFP: vmov.u8 r0, q5[7] @ encoding: [0xfa,0xee,0x70,0x0b] +vmov.u8 r0, q5[7] + +# ERROR: [[@LINE+1]]:{{[0-9]+}}: {{error|note}}: invalid operand for instruction +vmov.u8 r0, q5[16] + +vpste +vmvnt q0, q1 +vmvne q0, q1 +# CHECK: vpste @ encoding: [0x71,0xfe,0x4d,0x8f] +# CHECK-NOFP: vpste @ encoding: [0x71,0xfe,0x4d,0x8f] +# CHECK: vmvnt q0, q1 @ encoding: [0xb0,0xff,0xc2,0x05] +# CHECK-NOFP: vmvnt q0, q1 @ encoding: [0xb0,0xff,0xc2,0x05] +# CHECK: vmvne q0, q1 @ encoding: [0xb0,0xff,0xc2,0x05] +# CHECK-NOFP: vmvne q0, q1 @ encoding: [0xb0,0xff,0xc2,0x05] + +vpste +vornt.s8 q0, q1, q2 +vorne.s8 q0, q1, q2 +# CHECK: vpste @ encoding: [0x71,0xfe,0x4d,0x8f] +# CHECK-NOFP: vpste @ encoding: [0x71,0xfe,0x4d,0x8f] +# CHECK: vornt q0, q1, q2 @ encoding: [0x32,0xef,0x54,0x01] +# CHECK-NOFP: vornt q0, q1, q2 @ encoding: [0x32,0xef,0x54,0x01] +# CHECK: vorne q0, q1, q2 @ encoding: [0x32,0xef,0x54,0x01] +# CHECK-NOFP: vorne q0, q1, q2 @ encoding: [0x32,0xef,0x54,0x01] diff --git a/llvm/test/MC/ARM/mve-vmov-lane.s b/llvm/test/MC/ARM/mve-vmov-lane.s new file mode 100644 --- /dev/null +++ b/llvm/test/MC/ARM/mve-vmov-lane.s @@ -0,0 +1,16 @@ +// RUN: not llvm-mc -triple=thumbv8m.main -mattr=+fp-armv8 -show-encoding < %s 2>%t | FileCheck %s --check-prefix=V80M +// RUN: FileCheck %s < %t --check-prefix=V80M-ERROR +// RUN: llvm-mc -triple=thumbv8.1m.main -mattr=+fp-armv8 -show-encoding < %s 2>%t +// RUN: llvm-mc -triple=thumbv8.1m.main -mattr=+mve -show-encoding < %s 2>%t + +// v8.1M added the Q register syntax for this instruction. The v8.1M spec does +// not list the D register syntax as valid, but we accept it as an extension to +// make porting code from v8.0M to v8.1M easier. + +vmov.32 r0, d1[0] +// V80M: vmov.32 r0, d1[0] @ encoding: [0x11,0xee,0x10,0x0b] +// V81M: vmov.32 r0, d1[0] @ encoding: [0x11,0xee,0x10,0x0b] + +vmov.32 r0, q0[2] +// V80M-ERROR: :[[@LINE-1]]:{{[0-9]+}}: error: instruction requires: armv8.1m.main with FP or MVE +// V81M: vmov.32 r0, q0[2] @ encoding: [0x11,0xee,0x10,0x0b] diff --git a/llvm/test/MC/Disassembler/ARM/mve-bitops.txt b/llvm/test/MC/Disassembler/ARM/mve-bitops.txt new file mode 100644 --- /dev/null +++ b/llvm/test/MC/Disassembler/ARM/mve-bitops.txt @@ -0,0 +1,214 @@ +# RUN: not llvm-mc -disassemble -triple=thumbv8.1m.main-none-eabi -mattr=+mve.fp,+fp64 -show-encoding %s 2> %t | FileCheck %s +# RUN: FileCheck --check-prefix=ERROR < %t %s +# RUN: not llvm-mc -disassemble -triple=thumbv8.1m.main-none-eabi -show-encoding %s &> %t +# RUN: FileCheck --check-prefix=CHECK-NOMVE < %t %s + +# CHECK: vbic q0, q1, q7 @ encoding: [0x12,0xef,0x5e,0x01] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x12,0xef,0x5e,0x01] + +# CHECK: vrev64.8 q0, q4 @ encoding: [0xb0,0xff,0x48,0x00] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0xb0,0xff,0x48,0x00] + +# CHECK: vrev64.16 q1, q3 @ encoding: [0xb4,0xff,0x46,0x20] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0xb4,0xff,0x46,0x20] + +# CHECK: vrev64.32 q0, q2 @ encoding: [0xb8,0xff,0x44,0x00] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0xb8,0xff,0x44,0x00] + +# CHECK: vrev32.8 q0, q1 @ encoding: [0xb0,0xff,0xc2,0x00] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0xb0,0xff,0xc2,0x00] + +# CHECK: vrev32.16 q0, q5 @ encoding: [0xb4,0xff,0xca,0x00] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0xb4,0xff,0xca,0x00] + +# CHECK: vrev16.8 q0, q2 @ encoding: [0xb0,0xff,0x44,0x01] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0xb0,0xff,0x44,0x01] + +# CHECK: vmvn q0, q2 @ encoding: [0xb0,0xff,0xc4,0x05] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0xb0,0xff,0xc4,0x05] + +# CHECK: veor q2, q1, q7 @ encoding: [0x02,0xff,0x5e,0x41] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x02,0xff,0x5e,0x41] + +# CHECK: vorn q0, q3, q2 @ encoding: [0x36,0xef,0x54,0x01] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x36,0xef,0x54,0x01] + +# CHECK: vorr q1, q2, q1 @ encoding: [0x24,0xef,0x52,0x21] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x24,0xef,0x52,0x21] + +# CHECK: vand q0, q2, q0 @ encoding: [0x04,0xef,0x50,0x01] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x04,0xef,0x50,0x01] + +# CHECK: vmov.i32 q0, #0x1bff @ encoding: [0x81,0xef,0x5b,0x0c] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x81,0xef,0x5b,0x0c] + +# ERROR: [[@LINE+1]]:2: warning: invalid instruction encoding +[0xc0,0xef,0x50,0x00] + +# CHECK: vmov.i16 q0, #0x5c @ encoding: [0x85,0xef,0x5c,0x08] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x85,0xef,0x5c,0x08] + +# CHECK: vmov.i8 q0, #0x4c @ encoding: [0x84,0xef,0x5c,0x0e] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x84,0xef,0x5c,0x0e] + +# CHECK: vmov.f32 q0, #-3.625000e+00 @ encoding: [0x80,0xff,0x5d,0x0f] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x80,0xff,0x5d,0x0f] + +# CHECK: vmov.f32 q0, #1.000000e+00 @ encoding: [0x87,0xef,0x50,0x0f] +0x87,0xef,0x50,0x0f + +# CHECK: vmov.f32 s16, s1 @ encoding: [0xb0,0xee,0x60,0x8a] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0xb0,0xee,0x60,0x8a] + +# CHECK: vmov.f64 d0, d1 @ encoding: [0xb0,0xee,0x41,0x0b] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0xb0,0xee,0x41,0x0b] + +# CHECK: vmov.i64 q0, #0xff0000ffffffffff @ encoding: [0x81,0xff,0x7f,0x0e] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x81,0xff,0x7f,0x0e] + +# CHECK: vorr.i16 q0, #0x12 @ encoding: [0x81,0xef,0x52,0x09] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x81,0xef,0x52,0x09] + +# CHECK: vorr.i32 q0, #0x1200 @ encoding: [0x81,0xef,0x52,0x03] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x81,0xef,0x52,0x03] + +# CHECK: vmvn.i32 q0, #0x35 @ encoding: [0x83,0xef,0x75,0x00] +[0x83,0xef,0x75,0x00] + +# CHECK: vbic.i32 q0, #0x35 @ encoding: [0x83,0xef,0x75,0x01] +[0x83,0xef,0x75,0x01] + +# CHECK: vmvn.i32 q0, #0x3500 @ encoding: [0x83,0xef,0x75,0x02] +[0x83,0xef,0x75,0x02] + +# CHECK: vbic.i32 q0, #0x3500 @ encoding: [0x83,0xef,0x75,0x03] +[0x83,0xef,0x75,0x03] + +# CHECK: vmvn.i32 q0, #0x350000 @ encoding: [0x83,0xef,0x75,0x04] +[0x83,0xef,0x75,0x04] + +# CHECK: vbic.i32 q0, #0x350000 @ encoding: [0x83,0xef,0x75,0x05] +[0x83,0xef,0x75,0x05] + +# CHECK: vmvn.i32 q0, #0x35000000 @ encoding: [0x83,0xef,0x75,0x06] +[0x83,0xef,0x75,0x06] + +# CHECK: vbic.i32 q0, #0x35000000 @ encoding: [0x83,0xef,0x75,0x07] +[0x83,0xef,0x75,0x07] + +# CHECK: vmvn.i16 q0, #0x35 @ encoding: [0x83,0xef,0x75,0x08] +[0x83,0xef,0x75,0x08] + +# CHECK: vbic.i16 q0, #0x35 @ encoding: [0x83,0xef,0x75,0x09] +[0x83,0xef,0x75,0x09] + +# CHECK: vmvn.i16 q0, #0x3500 @ encoding: [0x83,0xef,0x75,0x0a] +[0x83,0xef,0x75,0x0a] + +# CHECK: vbic.i16 q0, #0x3500 @ encoding: [0x83,0xef,0x75,0x0b] +[0x83,0xef,0x75,0x0b] + +# CHECK: vmvn.i32 q0, #0x35ff @ encoding: [0x83,0xef,0x75,0x0c] +[0x83,0xef,0x75,0x0c] + +# CHECK: vmvn.i32 q0, #0x35ffff @ encoding: [0x83,0xef,0x75,0x0d] +[0x83,0xef,0x75,0x0d] + +# CHECK: vmov.i64 q0, #0xffff00ff00ff @ encoding: [0x83,0xef,0x75,0x0e] +[0x83,0xef,0x75,0x0e] + +# ERROR: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x83,0xef,0x75,0x0f] + +# CHECK: vmov.8 q0[1], r8 @ encoding: [0x40,0xee,0x30,0x8b] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x40,0xee,0x30,0x8b] + +# CHECK: vmov.16 q0[2], r5 @ encoding: [0x20,0xee,0x30,0x5b] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x20,0xee,0x30,0x5b] + +# CHECK: vmov.32 q6[3], r11 @ encoding: [0x2d,0xee,0x10,0xbb] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x2d,0xee,0x10,0xbb] + +# CHECK: vmov.32 r0, q1[0] @ encoding: [0x12,0xee,0x10,0x0b] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x12,0xee,0x10,0x0b] + +# CHECK: vmov.s16 r1, q2[7] @ encoding: [0x35,0xee,0x70,0x1b] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x35,0xee,0x70,0x1b] + +# CHECK: vmov.s8 r0, q4[13] @ encoding: [0x79,0xee,0x30,0x0b] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x79,0xee,0x30,0x0b] + +# CHECK: vmov.u16 r0, q1[4] @ encoding: [0x93,0xee,0x30,0x0b] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x93,0xee,0x30,0x0b] + +# CHECK: vmov.u8 r0, q5[7] @ encoding: [0xfa,0xee,0x70,0x0b] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0xfa,0xee,0x70,0x0b] + +# CHECK: vmov.f16 s7, r8 @ encoding: [0x03,0xee,0x90,0x89] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x03,0xee,0x90,0x89] + +# CHECK: vmov.f16 s10, r5 @ encoding: [0x05,0xee,0x10,0x59] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x05,0xee,0x10,0x59] + +# CHECK: vmov.f16 s10, sp @ encoding: [0x05,0xee,0x10,0xd9] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x05,0xee,0x10,0xd9] + +# CHECK: vmov.f16 s31, r10 @ encoding: [0x0f,0xee,0x90,0xa9] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x0f,0xee,0x90,0xa9] + +# CHECK: vmov.f16 r8, s7 @ encoding: [0x13,0xee,0x90,0x89] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x13,0xee,0x90,0x89] + +# CHECK: vmov.f16 r5, s10 @ encoding: [0x15,0xee,0x10,0x59] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x15,0xee,0x10,0x59] + +# CHECK: vmov.f16 sp, s10 @ encoding: [0x15,0xee,0x10,0xd9] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x15,0xee,0x10,0xd9] + +# CHECK: vmov.f16 r10, s31 @ encoding: [0x1f,0xee,0x90,0xa9] +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x1f,0xee,0x90,0xa9] + +# ERROR: [[@LINE+2]]:2: warning: potentially undefined instruction encoding +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x05,0xee,0x10,0xf9] + +# ERROR: [[@LINE+2]]:2: warning: potentially undefined instruction encoding +# CHECK-NOMVE: [[@LINE+1]]:2: warning: invalid instruction encoding +[0x1f,0xee,0x90,0xf9] diff --git a/llvm/test/MC/Disassembler/ARM/mve-vmov-lane.txt b/llvm/test/MC/Disassembler/ARM/mve-vmov-lane.txt new file mode 100644 --- /dev/null +++ b/llvm/test/MC/Disassembler/ARM/mve-vmov-lane.txt @@ -0,0 +1,13 @@ +# RUN: llvm-mc -triple=thumbv8m.main -mattr=+fp-armv8 -disassemble < %s | FileCheck %s --check-prefix=D_REG +# RUN: llvm-mc -triple=thumbv8a -mattr=+fp-armv8 -disassemble < %s | FileCheck %s --check-prefix=D_REG +# RUN: llvm-mc -triple=thumbv8.1m.main -mattr=+fp-armv8 -disassemble < %s | FileCheck %s --check-prefix=Q_REG +# RUN: llvm-mc -triple=thumbv8.1m.main -mattr=+mve -disassemble < %s | FileCheck %s --check-prefix=Q_REG + +# The disassembly for this instruction varies between v8.1M and other +# architectures. In v8.1M (with either scalar flotaing point, MVE or both), we +# use the Q register syntax, and for all other architectures we use the D +# register syntax. + +[0x11,0xee,0x10,0x0b] +# D_REG: vmov.32 r0, d1[0] +# Q_REG: vmov.32 r0, q0[2]