Index: include/llvm/CodeGen/ValueTypes.h =================================================================== --- include/llvm/CodeGen/ValueTypes.h +++ include/llvm/CodeGen/ValueTypes.h @@ -208,7 +208,7 @@ bool is64BitVector() const { return (SimpleTy == MVT::v8i8 || SimpleTy == MVT::v4i16 || SimpleTy == MVT::v2i32 || SimpleTy == MVT::v1i64 || - SimpleTy == MVT::v2f32); + SimpleTy == MVT::v1f64 || SimpleTy == MVT::v2f32); } /// is128BitVector - Return true if this is a 128-bit vector type. Index: lib/Target/AArch64/AArch64ISelDAGToDAG.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelDAGToDAG.cpp +++ lib/Target/AArch64/AArch64ISelDAGToDAG.cpp @@ -109,6 +109,23 @@ SDNode* Select(SDNode*); private: + /// Select NEON load intrinsics. NumVecs should be 1, 2, 3 or 4. + SDNode *SelectVLD(SDNode *N, unsigned NumVecs, const uint16_t *Opcode); + + /// Select NEON store intrinsics. NumVecs should be 1, 2, 3 or 4. + SDNode *SelectVST(SDNode *N, unsigned NumVecs, const uint16_t *Opcodes); + + // Form pairs of consecutive 64-bit/128-bit registers. + SDNode *createDPairNode(SDValue V0, SDValue V1); + SDNode *createQPairNode(SDValue V0, SDValue V1); + + // Form sequences of 3 consecutive 64-bit/128-bit registers. + SDNode *createDTripleNode(SDValue V0, SDValue V1, SDValue V2); + SDNode *createQTripleNode(SDValue V0, SDValue V1, SDValue V2); + + // Form sequences of 4 consecutive 64-bit/128-bit registers. + SDNode *createDQuadNode(SDValue V0, SDValue V1, SDValue V2, SDValue V3); + SDNode *createQQuadNode(SDValue V0, SDValue V1, SDValue V2, SDValue V3); }; } @@ -390,6 +407,221 @@ &Ops[0], Ops.size()); } +SDNode *AArch64DAGToDAGISel::createDPairNode(SDValue V0, SDValue V1) { + SDLoc dl(V0.getNode()); + SDValue RegClass = + CurDAG->getTargetConstant(AArch64::DPairRegClassID, MVT::i32); + SDValue SubReg0 = CurDAG->getTargetConstant(AArch64::dsub_0, MVT::i32); + SDValue SubReg1 = CurDAG->getTargetConstant(AArch64::dsub_1, MVT::i32); + const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::v2i64, + Ops); +} + +SDNode *AArch64DAGToDAGISel::createQPairNode(SDValue V0, SDValue V1) { + SDLoc dl(V0.getNode()); + SDValue RegClass = + CurDAG->getTargetConstant(AArch64::QPairRegClassID, MVT::i32); + SDValue SubReg0 = CurDAG->getTargetConstant(AArch64::qsub_0, MVT::i32); + SDValue SubReg1 = CurDAG->getTargetConstant(AArch64::qsub_1, MVT::i32); + const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1 }; + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::v4i64, + Ops); +} + +SDNode *AArch64DAGToDAGISel::createDTripleNode(SDValue V0, SDValue V1, + SDValue V2) { + SDLoc dl(V0.getNode()); + SDValue RegClass = + CurDAG->getTargetConstant(AArch64::DTripleRegClassID, MVT::i32); + SDValue SubReg0 = CurDAG->getTargetConstant(AArch64::dsub_0, MVT::i32); + SDValue SubReg1 = CurDAG->getTargetConstant(AArch64::dsub_1, MVT::i32); + SDValue SubReg2 = CurDAG->getTargetConstant(AArch64::dsub_2, MVT::i32); + const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, V2, SubReg2 }; + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, + Ops); +} + +SDNode *AArch64DAGToDAGISel::createQTripleNode(SDValue V0, SDValue V1, + SDValue V2) { + SDLoc dl(V0.getNode()); + SDValue RegClass = + CurDAG->getTargetConstant(AArch64::QTripleRegClassID, MVT::i32); + SDValue SubReg0 = CurDAG->getTargetConstant(AArch64::qsub_0, MVT::i32); + SDValue SubReg1 = CurDAG->getTargetConstant(AArch64::qsub_1, MVT::i32); + SDValue SubReg2 = CurDAG->getTargetConstant(AArch64::qsub_2, MVT::i32); + const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, V2, SubReg2 }; + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::Untyped, + Ops); +} + +SDNode *AArch64DAGToDAGISel::createDQuadNode(SDValue V0, SDValue V1, SDValue V2, + SDValue V3) { + SDLoc dl(V0.getNode()); + SDValue RegClass = + CurDAG->getTargetConstant(AArch64::DQuadRegClassID, MVT::i32); + SDValue SubReg0 = CurDAG->getTargetConstant(AArch64::dsub_0, MVT::i32); + SDValue SubReg1 = CurDAG->getTargetConstant(AArch64::dsub_1, MVT::i32); + SDValue SubReg2 = CurDAG->getTargetConstant(AArch64::dsub_2, MVT::i32); + SDValue SubReg3 = CurDAG->getTargetConstant(AArch64::dsub_3, MVT::i32); + const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, + SubReg3 }; + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::v4i64, + Ops); +} + +SDNode *AArch64DAGToDAGISel::createQQuadNode(SDValue V0, SDValue V1, SDValue V2, + SDValue V3) { + SDLoc dl(V0.getNode()); + SDValue RegClass = + CurDAG->getTargetConstant(AArch64::QQuadRegClassID, MVT::i32); + SDValue SubReg0 = CurDAG->getTargetConstant(AArch64::qsub_0, MVT::i32); + SDValue SubReg1 = CurDAG->getTargetConstant(AArch64::qsub_1, MVT::i32); + SDValue SubReg2 = CurDAG->getTargetConstant(AArch64::qsub_2, MVT::i32); + SDValue SubReg3 = CurDAG->getTargetConstant(AArch64::qsub_3, MVT::i32); + const SDValue Ops[] = { RegClass, V0, SubReg0, V1, SubReg1, V2, SubReg2, V3, + SubReg3 }; + return CurDAG->getMachineNode(TargetOpcode::REG_SEQUENCE, dl, MVT::v8i64, + Ops); +} + +SDNode *AArch64DAGToDAGISel::SelectVLD(SDNode *N, unsigned NumVecs, + const uint16_t *Opcodes) { + assert(NumVecs >= 1 && NumVecs <= 4 && "VLD NumVecs out-of-range"); + + EVT VT = N->getValueType(0); + unsigned OpcodeIndex; + switch (VT.getSimpleVT().SimpleTy) { + default: llvm_unreachable("unhandled vector load type"); + case MVT::v8i8: OpcodeIndex = 0; break; + case MVT::v4i16: OpcodeIndex = 1; break; + case MVT::v2f32: + case MVT::v2i32: OpcodeIndex = 2; break; + case MVT::v1f64: + case MVT::v1i64: OpcodeIndex = 3; break; + case MVT::v16i8: OpcodeIndex = 4; break; + case MVT::v8f16: + case MVT::v8i16: OpcodeIndex = 5; break; + case MVT::v4f32: + case MVT::v4i32: OpcodeIndex = 6; break; + case MVT::v2f64: + case MVT::v2i64: OpcodeIndex = 7; break; + } + unsigned Opc = Opcodes[OpcodeIndex]; + + SmallVector Ops; + Ops.push_back(N->getOperand(2)); // Push back the Memory Address + Ops.push_back(N->getOperand(0)); // Push back the Chain + + std::vector ResTys; + bool is64BitVector = VT.is64BitVector(); + + if (NumVecs == 1) + ResTys.push_back(VT); + else if (NumVecs == 3) + ResTys.push_back(MVT::Untyped); + else { + EVT ResTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, + is64BitVector ? NumVecs : NumVecs * 2); + ResTys.push_back(ResTy); + } + + ResTys.push_back(MVT::Other); // Type of the Chain + SDLoc dl(N); + SDNode *VLd = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); + + // Transfer memoperands. + MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); + MemOp[0] = cast(N)->getMemOperand(); + cast(VLd)->setMemRefs(MemOp, MemOp + 1); + + if (NumVecs == 1) + return VLd; + + // If NumVecs > 1, the return result is a super register containing 2-4 + // consecutive vector registers. + SDValue SuperReg = SDValue(VLd, 0); + + unsigned Sub0 = is64BitVector ? AArch64::dsub_0 : AArch64::qsub_0; + for (unsigned Vec = 0; Vec < NumVecs; ++Vec) + ReplaceUses(SDValue(N, Vec), + CurDAG->getTargetExtractSubreg(Sub0 + Vec, dl, VT, SuperReg)); + // Update users of the Chain + ReplaceUses(SDValue(N, NumVecs), SDValue(VLd, 1)); + + return NULL; +} + +SDNode *AArch64DAGToDAGISel::SelectVST(SDNode *N, unsigned NumVecs, + const uint16_t *Opcodes) { + assert(NumVecs >= 1 && NumVecs <= 4 && "VST NumVecs out-of-range"); + SDLoc dl(N); + + MachineSDNode::mmo_iterator MemOp = MF->allocateMemRefsArray(1); + MemOp[0] = cast(N)->getMemOperand(); + + unsigned Vec0Idx = 3; + EVT VT = N->getOperand(Vec0Idx).getValueType(); + unsigned OpcodeIndex; + switch (VT.getSimpleVT().SimpleTy) { + default: llvm_unreachable("unhandled vector store type"); + case MVT::v8i8: OpcodeIndex = 0; break; + case MVT::v4i16: OpcodeIndex = 1; break; + case MVT::v2f32: + case MVT::v2i32: OpcodeIndex = 2; break; + case MVT::v1f64: + case MVT::v1i64: OpcodeIndex = 3; break; + case MVT::v16i8: OpcodeIndex = 4; break; + case MVT::v8f16: + case MVT::v8i16: OpcodeIndex = 5; break; + case MVT::v4f32: + case MVT::v4i32: OpcodeIndex = 6; break; + case MVT::v2f64: + case MVT::v2i64: OpcodeIndex = 7; break; + } + unsigned Opc = Opcodes[OpcodeIndex]; + + std::vector ResTys; + ResTys.push_back(MVT::Other); // Type for the Chain + + SmallVector Ops; + Ops.push_back(N->getOperand(2)); // Push back the Memory Address + + bool is64BitVector = VT.is64BitVector(); + + SDValue V0 = N->getOperand(Vec0Idx + 0); + SDValue SrcReg; + if (NumVecs == 1) + SrcReg = V0; + else { + SDValue V1 = N->getOperand(Vec0Idx + 1); + if (NumVecs == 2) + SrcReg = is64BitVector ? SDValue(createDPairNode(V0, V1), 0) + : SDValue(createQPairNode(V0, V1), 0); + else { + SDValue V2 = N->getOperand(Vec0Idx + 2); + if (NumVecs == 3) + SrcReg = is64BitVector ? SDValue(createDTripleNode(V0, V1, V2), 0) + : SDValue(createQTripleNode(V0, V1, V2), 0); + else { + SDValue V3 = N->getOperand(Vec0Idx + 3); + SrcReg = is64BitVector ? SDValue(createDQuadNode(V0, V1, V2, V3), 0) + : SDValue(createQQuadNode(V0, V1, V2, V3), 0); + } + } + } + Ops.push_back(SrcReg); + + // Push back the Chain + Ops.push_back(N->getOperand(0)); + + // Transfer memoperands. + SDNode *VSt = CurDAG->getMachineNode(Opc, dl, ResTys, Ops); + cast(VSt)->setMemRefs(MemOp, MemOp + 1); + + return VSt; +} + SDNode *AArch64DAGToDAGISel::Select(SDNode *Node) { // Dump information about the Node being selected DEBUG(dbgs() << "Selecting: "; Node->dump(CurDAG); dbgs() << "\n"); @@ -536,6 +768,72 @@ Node = ResNode; break; } + case ISD::INTRINSIC_VOID: + case ISD::INTRINSIC_W_CHAIN: { + unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); + switch (IntNo) { + default: + break; + + case Intrinsic::arm_neon_vld1: { + static const uint16_t Opcodes[] = { AArch64::LD1_8B, AArch64::LD1_4H, + AArch64::LD1_2S, AArch64::LD1_1D, + AArch64::LD1_16B, AArch64::LD1_8H, + AArch64::LD1_4S, AArch64::LD1_2D }; + return SelectVLD(Node, 1, Opcodes); + } + case Intrinsic::arm_neon_vld2: { + static const uint16_t Opcodes[] = { AArch64::LD2_8B, AArch64::LD2_4H, + AArch64::LD2_2S, AArch64::LD1_2V_1D, + AArch64::LD2_16B, AArch64::LD2_8H, + AArch64::LD2_4S, AArch64::LD2_2D }; + return SelectVLD(Node, 2, Opcodes); + } + case Intrinsic::arm_neon_vld3: { + static const uint16_t Opcodes[] = { AArch64::LD3_8B, AArch64::LD3_4H, + AArch64::LD3_2S, AArch64::LD1_3V_1D, + AArch64::LD3_16B, AArch64::LD3_8H, + AArch64::LD3_4S, AArch64::LD3_2D }; + return SelectVLD(Node, 3, Opcodes); + } + case Intrinsic::arm_neon_vld4: { + static const uint16_t Opcodes[] = { AArch64::LD4_8B, AArch64::LD4_4H, + AArch64::LD4_2S, AArch64::LD1_4V_1D, + AArch64::LD4_16B, AArch64::LD4_8H, + AArch64::LD4_4S, AArch64::LD4_2D }; + return SelectVLD(Node, 4, Opcodes); + } + case Intrinsic::arm_neon_vst1: { + static const uint16_t Opcodes[] = { AArch64::ST1_8B, AArch64::ST1_4H, + AArch64::ST1_2S, AArch64::ST1_1D, + AArch64::ST1_16B, AArch64::ST1_8H, + AArch64::ST1_4S, AArch64::ST1_2D }; + return SelectVST(Node, 1, Opcodes); + } + case Intrinsic::arm_neon_vst2: { + static const uint16_t Opcodes[] = { AArch64::ST2_8B, AArch64::ST2_4H, + AArch64::ST2_2S, AArch64::ST1_2V_1D, + AArch64::ST2_16B, AArch64::ST2_8H, + AArch64::ST2_4S, AArch64::ST2_2D }; + return SelectVST(Node, 2, Opcodes); + } + case Intrinsic::arm_neon_vst3: { + static const uint16_t Opcodes[] = { AArch64::ST3_8B, AArch64::ST3_4H, + AArch64::ST3_2S, AArch64::ST1_3V_1D, + AArch64::ST3_16B, AArch64::ST3_8H, + AArch64::ST3_4S, AArch64::ST3_2D }; + return SelectVST(Node, 3, Opcodes); + } + case Intrinsic::arm_neon_vst4: { + static const uint16_t Opcodes[] = { AArch64::ST4_8B, AArch64::ST4_4H, + AArch64::ST4_2S, AArch64::ST1_4V_1D, + AArch64::ST4_16B, AArch64::ST4_8H, + AArch64::ST4_4S, AArch64::ST4_2D }; + return SelectVST(Node, 4, Opcodes); + } + } + break; + } default: break; // Let generic code handle it } Index: lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.h +++ lib/Target/AArch64/AArch64ISelLowering.h @@ -281,6 +281,9 @@ std::pair getRegForInlineAsmConstraint(const std::string &Constraint, MVT VT) const; + + virtual bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I, + unsigned Intrinsic) const; private: const InstrItineraryData *Itins; Index: lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- lib/Target/AArch64/AArch64ISelLowering.cpp +++ lib/Target/AArch64/AArch64ISelLowering.cpp @@ -3681,3 +3681,57 @@ // constraint into a member of a register class. return TargetLowering::getRegForInlineAsmConstraint(Constraint, VT); } + +/// Represent NEON load and store intrinsics as MemIntrinsicNodes. +/// The associated MachineMemOperands record the alignment specified +/// in the intrinsic calls. +bool AArch64TargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info, + const CallInst &I, + unsigned Intrinsic) const { + switch (Intrinsic) { + case Intrinsic::arm_neon_vld1: + case Intrinsic::arm_neon_vld2: + case Intrinsic::arm_neon_vld3: + case Intrinsic::arm_neon_vld4: { + Info.opc = ISD::INTRINSIC_W_CHAIN; + // Conservatively set memVT to the entire set of vectors loaded. + uint64_t NumElts = getDataLayout()->getTypeAllocSize(I.getType()) / 8; + Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); + Info.ptrVal = I.getArgOperand(0); + Info.offset = 0; + Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); + Info.align = cast(AlignArg)->getZExtValue(); + Info.vol = false; // volatile loads with NEON intrinsics not supported + Info.readMem = true; + Info.writeMem = false; + return true; + } + case Intrinsic::arm_neon_vst1: + case Intrinsic::arm_neon_vst2: + case Intrinsic::arm_neon_vst3: + case Intrinsic::arm_neon_vst4: { + Info.opc = ISD::INTRINSIC_VOID; + // Conservatively set memVT to the entire set of vectors stored. + unsigned NumElts = 0; + for (unsigned ArgI = 1, ArgE = I.getNumArgOperands(); ArgI < ArgE; ++ArgI) { + Type *ArgTy = I.getArgOperand(ArgI)->getType(); + if (!ArgTy->isVectorTy()) + break; + NumElts += getDataLayout()->getTypeAllocSize(ArgTy) / 8; + } + Info.memVT = EVT::getVectorVT(I.getType()->getContext(), MVT::i64, NumElts); + Info.ptrVal = I.getArgOperand(0); + Info.offset = 0; + Value *AlignArg = I.getArgOperand(I.getNumArgOperands() - 1); + Info.align = cast(AlignArg)->getZExtValue(); + Info.vol = false; // volatile stores with NEON intrinsics not supported + Info.readMem = false; + Info.writeMem = true; + return true; + } + default: + break; + } + + return false; +} Index: lib/Target/AArch64/AArch64InstrFormats.td =================================================================== --- lib/Target/AArch64/AArch64InstrFormats.td +++ lib/Target/AArch64/AArch64InstrFormats.td @@ -1194,5 +1194,23 @@ // Inherit Rd in 4-0 } +// Format AdvSIMD vector load/store multiple N-element structure +class NeonI_LdStMult opcode, bits<2> size, + dag outs, dag ins, string asmstr, + list patterns, InstrItinClass itin> + : A64InstRtn +{ + let Inst{31} = 0b0; + let Inst{30} = q; + let Inst{29-23} = 0b0011000; + let Inst{22} = l; + let Inst{21-16} = 0b000000; + let Inst{15-12} = opcode; + let Inst{11-10} = size; + + // Inherit Rn in 9-5 + // Inherit Rt in 4-0 +} + } Index: lib/Target/AArch64/AArch64InstrNEON.td =================================================================== --- lib/Target/AArch64/AArch64InstrNEON.td +++ lib/Target/AArch64/AArch64InstrNEON.td @@ -2982,6 +2982,163 @@ // End of implementation for instruction class (3V Diff) +// Vector load/store multiple N-element structure (class SIMD lselem) + +// ld1: load multiple 1-element structure to 1/2/3/4 registers. +// ld2/ld3/ld4: load multiple N-element structure to N registers (N = 2, 3, 4). +// The structure consists of a sequence of sets of N values. +// The first element of the structure is placed in the first lane of +// the first first vector, the second element in the first lane of +// the second vector, and so on. +// E.g. LD1_3V_2S will load 32-bit elements {A, B, C, D, E, F} sequentially into +// the three 64-bit vectors list {BA, DC, FE}. +// E.g. LD3_2S will load 32-bit elements {A, B, C, D, E, F} into the three +// 64-bit vectors list {DA, EB, FC}. +// Store instructions store multiple N-element structure to N registers like load. + +multiclass VectorList_operands { + def _asmoperand : AsmOperandClass { + let Name = PREFIX # LAYOUT # Count; + let RenderMethod = "addVectorListOperands"; + let PredicateMethod = + "isVectorList"; + let ParserMethod = "ParseVectorList"; + } + + def _operand : RegisterOperand"> { + let ParserMatchClass = + !cast(PREFIX # LAYOUT # "_asmoperand"); + } +} + +multiclass VectorList_BHSD { + defm 8B : VectorList_operands; + defm 4H : VectorList_operands; + defm 2S : VectorList_operands; + defm 1D : VectorList_operands; + defm 16B : VectorList_operands; + defm 8H : VectorList_operands; + defm 4S : VectorList_operands; + defm 2D : VectorList_operands; +} + +// Vector List Operand with 1/2/3/4 registers +defm VOne : VectorList_BHSD<"VOne", 1, FPR64, FPR128>; +defm VPair : VectorList_BHSD<"VPair", 2, DPair, QPair>; +defm VTriple : VectorList_BHSD<"VTriple", 3, DTriple, QTriple>; +defm VQuad : VectorList_BHSD<"VQuad", 4, DQuad, QQuad>; + +class NeonI_LDVList opcode, bits<2> size, + RegisterOperand VecList, string asmop> + : NeonI_LdStMult { + let mayLoad = 1; +} + +multiclass LDVList_BHSD opcode, string List, string asmop> { + def _8B : NeonI_LDVList<0, opcode, 0b00, + !cast(List # "8B_operand"), asmop>; + + def _4H : NeonI_LDVList<0, opcode, 0b01, + !cast(List # "4H_operand"), asmop>; + + def _2S : NeonI_LDVList<0, opcode, 0b10, + !cast(List # "2S_operand"), asmop>; + + def _16B : NeonI_LDVList<1, opcode, 0b00, + !cast(List # "16B_operand"), asmop>; + + def _8H : NeonI_LDVList<1, opcode, 0b01, + !cast(List # "8H_operand"), asmop>; + + def _4S : NeonI_LDVList<1, opcode, 0b10, + !cast(List # "4S_operand"), asmop>; + + def _2D : NeonI_LDVList<1, opcode, 0b11, + !cast(List # "2D_operand"), asmop>; +} + +// Load multiple N-element structure to N consecutive registers (N = 1,2,3,4) +defm LD1 : LDVList_BHSD<0b0111, "VOne", "ld1">; +def LD1_1D : NeonI_LDVList<0, 0b0111, 0b11, VOne1D_operand, "ld1">; + +defm LD2 : LDVList_BHSD<0b1000, "VPair", "ld2">; + +defm LD3 : LDVList_BHSD<0b0100, "VTriple", "ld3">; + +defm LD4 : LDVList_BHSD<0b0000, "VQuad", "ld4">; + +// Load multiple 1-element structure to N consecutive registers (N = 2,3,4) +defm LD1_2V : LDVList_BHSD<0b1010, "VPair", "ld1">; +def LD1_2V_1D : NeonI_LDVList<0, 0b1010, 0b11, VPair1D_operand, "ld1">; + +defm LD1_3V : LDVList_BHSD<0b0110, "VTriple", "ld1">; +def LD1_3V_1D : NeonI_LDVList<0, 0b0110, 0b11, VTriple1D_operand, "ld1">; + +defm LD1_4V : LDVList_BHSD<0b0010, "VQuad", "ld1">; +def LD1_4V_1D : NeonI_LDVList<0, 0b0010, 0b11, VQuad1D_operand, "ld1">; + +class NeonI_STVList opcode, bits<2> size, + RegisterOperand VecList, string asmop> + : NeonI_LdStMult { + let mayStore = 1; +} + +multiclass STVList_BHSD opcode, string List, string asmop> { + def _8B : NeonI_STVList<0, opcode, 0b00, + !cast(List # "8B_operand"), asmop>; + + def _4H : NeonI_STVList<0, opcode, 0b01, + !cast(List # "4H_operand"), asmop>; + + def _2S : NeonI_STVList<0, opcode, 0b10, + !cast(List # "2S_operand"), asmop>; + + def _16B : NeonI_STVList<1, opcode, 0b00, + !cast(List # "16B_operand"), asmop>; + + def _8H : NeonI_STVList<1, opcode, 0b01, + !cast(List # "8H_operand"), asmop>; + + def _4S : NeonI_STVList<1, opcode, 0b10, + !cast(List # "4S_operand"), asmop>; + + def _2D : NeonI_STVList<1, opcode, 0b11, + !cast(List # "2D_operand"), asmop>; +} + +// Store multiple N-element structures from N registers (N = 1,2,3,4) +defm ST1 : STVList_BHSD<0b0111, "VOne", "st1">; +def ST1_1D : NeonI_STVList<0, 0b0111, 0b11, VOne1D_operand, "st1">; + +defm ST2 : STVList_BHSD<0b1000, "VPair", "st2">; + +defm ST3 : STVList_BHSD<0b0100, "VTriple", "st3">; + +defm ST4 : STVList_BHSD<0b0000, "VQuad", "st4">; + +// Store multiple 1-element structures from N consecutive registers (N = 2,3,4) +defm ST1_2V : STVList_BHSD<0b1010, "VPair", "st1">; +def ST1_2V_1D : NeonI_STVList<0, 0b1010, 0b11, VPair1D_operand, "st1">; + +defm ST1_3V : STVList_BHSD<0b0110, "VTriple", "st1">; +def ST1_3V_1D : NeonI_STVList<0, 0b0110, 0b11, VTriple1D_operand, "st1">; + +defm ST1_4V : STVList_BHSD<0b0010, "VQuad", "st1">; +def ST1_4V_1D : NeonI_STVList<0, 0b0010, 0b11, VQuad1D_operand, "st1">; + +// End of vector load/store multiple N-element structure(class SIMD lselem) + // Scalar Arithmetic class NeonI_Scalar3Same_D_size opcode, string asmop> Index: lib/Target/AArch64/AArch64RegisterInfo.td =================================================================== --- lib/Target/AArch64/AArch64RegisterInfo.td +++ lib/Target/AArch64/AArch64RegisterInfo.td @@ -17,6 +17,20 @@ def sub_32 : SubRegIndex<32>; def sub_16 : SubRegIndex<16>; def sub_8 : SubRegIndex<8>; + +// Note: Code depends on these having consecutive numbers. +def qqsub : SubRegIndex<256, 256>; + +def qsub_0 : SubRegIndex<128>; +def qsub_1 : SubRegIndex<128, 128>; +def qsub_2 : ComposedSubRegIndex; +def qsub_3 : ComposedSubRegIndex; + +def dsub_0 : SubRegIndex<64>; +def dsub_1 : SubRegIndex<64, 64>; +def dsub_2 : ComposedSubRegIndex; +def dsub_3 : ComposedSubRegIndex; +def dsub_4 : ComposedSubRegIndex; } // Registers are identified with 5-bit ID numbers. @@ -188,3 +202,57 @@ let CopyCost = -1; let isAllocatable = 0; } + +//===----------------------------------------------------------------------===// +// Consecutive vector registers +//===----------------------------------------------------------------------===// +// 2 Consecutive 64-bit registers: D0_D1, D1_D2, ..., D30_D31 +def Tuples2D : RegisterTuples<[dsub_0, dsub_1], + [(rotl FPR64, 0), (rotl FPR64, 1)]>; + +// 3 Consecutive 64-bit registers: D0_D1_D2, ..., D31_D0_D1 +def Tuples3D : RegisterTuples<[dsub_0, dsub_1, dsub_2], + [(rotl FPR64, 0), (rotl FPR64, 1), + (rotl FPR64, 2)]>; + +// 4 Consecutive 64-bit registers: D0_D1_D2_D3, ..., D31_D0_D1_D2 +def Tuples4D : RegisterTuples<[dsub_0, dsub_1, dsub_2, dsub_3], + [(rotl FPR64, 0), (rotl FPR64, 1), + (rotl FPR64, 2), (rotl FPR64, 3)]>; + +// 2 Consecutive 64-bit registers: Q0_Q1, Q1_Q2, ..., Q30_Q31 +def Tuples2Q : RegisterTuples<[qsub_0, qsub_1], + [(rotl FPR128, 0), + (rotl FPR128, 1)]>; + +// 3 Consecutive 64-bit registers: Q0_Q1_Q2, ..., Q31_Q0_Q1 +def Tuples3Q : RegisterTuples<[qsub_0, qsub_1, qsub_2], + [(rotl FPR128, 0), + (rotl FPR128, 1), + (rotl FPR128, 2)]>; + +// 4 Consecutive 64-bit registers: Q0_Q1_Q2_Q3, ..., Q31_Q0_Q1_Q2 +def Tuples4Q : RegisterTuples<[qsub_0, qsub_1, qsub_2, qsub_3], + [(rotl FPR128, 0), + (rotl FPR128, 1), + (rotl FPR128, 2), + (rotl FPR128, 3)]>; + +// Following are super register classes to model 2/3/4 consecutive +// 64-bit/128-bit registers. + +def DPair : RegisterClass<"AArch64", [v2i64], 64, (add Tuples2D)>; + +def DTriple : RegisterClass<"AArch64", [untyped], 64, (add Tuples3D)> { + let Size = 192; // 3 x 64 bits, we have no predefined type of that size. +} + +def DQuad : RegisterClass<"AArch64", [v4i64], 64, (add Tuples4D)>; + +def QPair : RegisterClass<"AArch64", [v4i64], 128, (add Tuples2Q)>; + +def QTriple : RegisterClass<"AArch64", [untyped], 128, (add Tuples3Q)> { + let Size = 384; // 3 x 128 bits, we have no predefined type of that size. +} + +def QQuad : RegisterClass<"AArch64", [v8i64], 128, (add Tuples4Q)>; Index: lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp =================================================================== --- lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp +++ lib/Target/AArch64/AsmParser/AArch64AsmParser.cpp @@ -127,6 +127,11 @@ OperandMatchResultTy ParseSysRegOperand(SmallVectorImpl &Operands); + bool TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, StringRef &Layout, + SMLoc &LayoutLoc); + + OperandMatchResultTy ParseVectorList(SmallVectorImpl &); + bool validateInstruction(MCInst &Inst, const SmallVectorImpl &Operands); @@ -154,6 +159,7 @@ k_Immediate, // Including expressions referencing symbols k_Register, k_ShiftExtend, + k_VectorList, // A sequential list of 1 to 4 registers. k_SysReg, // The register operand of MRS and MSR instructions k_Token, // The mnemonic; other raw tokens the auto-generated k_WrappedRegister // Load/store exclusive permit a wrapped register. @@ -189,6 +195,13 @@ bool ImplicitAmount; }; + // A vector register list is a sequential list of 1 to 4 registers. + struct VectorListOp { + unsigned RegNum; + unsigned Count; + A64Layout::VectorLayout Layout; + }; + struct SysRegOp { const char *Data; unsigned Length; @@ -206,6 +219,7 @@ struct ImmOp Imm; struct RegOp Reg; struct ShiftExtendOp ShiftExtend; + struct VectorListOp VectorList; struct SysRegOp SysReg; struct TokOp Tok; }; @@ -717,6 +731,12 @@ return ShiftExtend.Amount == 8 || ShiftExtend.Amount == 16; } + template + bool isVectorList() const { + return Kind == k_VectorList && VectorList.Layout == Layout && + VectorList.Count == Count; + } + template bool isSImm7Scaled() const { if (!isImm()) return false; @@ -837,6 +857,18 @@ return Op; } + static AArch64Operand *CreateVectorList(unsigned RegNum, unsigned Count, + A64Layout::VectorLayout Layout, + SMLoc S, SMLoc E) { + AArch64Operand *Op = new AArch64Operand(k_VectorList, S, E); + Op->VectorList.RegNum = RegNum; + Op->VectorList.Count = Count; + Op->VectorList.Layout = Layout; + Op->StartLoc = S; + Op->EndLoc = E; + return Op; + } + static AArch64Operand *CreateToken(StringRef Str, SMLoc S) { AArch64Operand *Op = new AArch64Operand(k_Token, S, S); Op->Tok.Data = Str.data(); @@ -1184,6 +1216,11 @@ } Inst.addOperand(MCOperand::CreateImm(Imm)); } + + void addVectorListOperands(MCInst &Inst, unsigned N) const { + assert(N == 1 && "Invalid number of operands!"); + Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); + } }; } // end anonymous namespace. @@ -1223,7 +1260,6 @@ else return MatchOperand_Success; } - // ... or it might be a symbolish thing } // Fall through @@ -1267,7 +1303,7 @@ return ParseOperand(Operands, Mnemonic); } // The following will likely be useful later, but not in very early cases - case AsmToken::LCurly: // Weird SIMD lists + case AsmToken::LCurly: // SIMD vector list is not parsed here llvm_unreachable("Don't know how to deal with '{' in operand"); return MatchOperand_ParseFail; } @@ -1890,6 +1926,131 @@ return MatchOperand_Success; } +/// Try to parse a vector register token, If it is a vector register, +/// the token is eaten and return true. Otherwise return false. +bool AArch64AsmParser::TryParseVector(uint32_t &RegNum, SMLoc &RegEndLoc, + StringRef &Layout, SMLoc &LayoutLoc) { + bool IsVector = true; + + if (!IdentifyRegister(RegNum, RegEndLoc, Layout, LayoutLoc)) + IsVector = false; + + if (!AArch64MCRegisterClasses[AArch64::FPR64RegClassID].contains(RegNum) && + !AArch64MCRegisterClasses[AArch64::FPR128RegClassID].contains(RegNum)) + IsVector = false; + + if (Layout.size() == 0) + IsVector = false; + + if (!IsVector) + Error(Parser.getTok().getLoc(), "expected vector type register"); + + Parser.Lex(); // Eat this token. + return IsVector; +} + +// A vector list contains 1-4 consecutive registers. +// Now there are two kinds of vector list when number of vector > 1: +// (1) {Vn.layout, Vn+1.layout, ... , Vm.layout} +// (2) {Vn.layout - Vm.layout} +AArch64AsmParser::OperandMatchResultTy AArch64AsmParser::ParseVectorList( + SmallVectorImpl &Operands) { + if (Parser.getTok().isNot(AsmToken::LCurly)) { + Error(Parser.getTok().getLoc(), "'{' expected"); + return MatchOperand_ParseFail; + } + SMLoc SLoc = Parser.getTok().getLoc(); + Parser.Lex(); // Eat '{' token. + + unsigned Reg, Count = 1; + StringRef LayoutStr; + SMLoc RegEndLoc, LayoutLoc; + if (!TryParseVector(Reg, RegEndLoc, LayoutStr, LayoutLoc)) + return MatchOperand_ParseFail; + + if (Parser.getTok().is(AsmToken::Minus)) { + Parser.Lex(); // Eat the minus. + + unsigned Reg2; + StringRef LayoutStr2; + SMLoc RegEndLoc2, LayoutLoc2; + SMLoc RegLoc2 = Parser.getTok().getLoc(); + + if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2)) + return MatchOperand_ParseFail; + + unsigned Space = (Reg < Reg2) ? (Reg2 - Reg) : (Reg2 + 32 - Reg); + // The vectors to be added into list should be 1, 2 or 3. + // And they should have the same layout. + if (Space == 0 || Space > 3 || LayoutStr != LayoutStr2) { + Error(RegLoc2, "invalid operand for instruction"); + return MatchOperand_ParseFail; + } + + Count += Space; + } else { + unsigned LastReg = Reg; + while (Parser.getTok().is(AsmToken::Comma)) { + Parser.Lex(); // Eat the comma. + unsigned Reg2; + StringRef LayoutStr2; + SMLoc RegEndLoc2, LayoutLoc2; + SMLoc RegLoc2 = Parser.getTok().getLoc(); + if (!TryParseVector(Reg2, RegEndLoc2, LayoutStr2, LayoutLoc2)) + return MatchOperand_ParseFail; + + unsigned Space = (LastReg < Reg2) ? (Reg2 - LastReg) + : (Reg2 + 32 - LastReg); + Count++; + + // The space between two vectors should be 1. And they should have the same layout. + // Total count shouldn't be great than 4 + if (Space != 1 || LayoutStr != LayoutStr2 || Count > 4) { + Error(RegLoc2, "invalid operand for instruction"); + return MatchOperand_ParseFail; + } + + LastReg = Reg2; + } + } + + if (Parser.getTok().isNot(AsmToken::RCurly)) { + Error(Parser.getTok().getLoc(), "'}' expected"); + return MatchOperand_ParseFail; + } + SMLoc ELoc = Parser.getTok().getLoc(); + Parser.Lex(); // Eat '}' token. + + A64Layout::VectorLayout Layout = A64StringToVectorLayout(LayoutStr); + if (Count > 1) { + // Use a super register containing 2-4 consecutive vector registers. + unsigned SupRegID; + bool IsVec64 = (Layout < A64Layout::_16B) ? true : false; + switch (Count) { + case 2: + SupRegID = + (IsVec64) ? AArch64::DPairRegClassID : AArch64::QPairRegClassID; + break; + case 3: + SupRegID = + (IsVec64) ? AArch64::DTripleRegClassID : AArch64::QTripleRegClassID; + break; + case 4: + SupRegID = + (IsVec64) ? AArch64::DQuadRegClassID : AArch64::QQuadRegClassID; + break; + } + unsigned Sub0 = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0; + const MCRegisterInfo *MRI = getContext().getRegisterInfo(); + Reg = MRI->getMatchingSuperReg(Reg, Sub0, + &AArch64MCRegisterClasses[SupRegID]); + } + Operands.push_back( + AArch64Operand::CreateVectorList(Reg, Count, Layout, SLoc, ELoc)); + + return MatchOperand_Success; +} + // FIXME: We would really like to be able to tablegen'erate this. bool AArch64AsmParser:: validateInstruction(MCInst &Inst, Index: lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp =================================================================== --- lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp +++ lib/Target/AArch64/Disassembler/AArch64Disassembler.cpp @@ -361,6 +361,72 @@ return DecodeFPR128RegisterClass(Inst, RegNo, Address, Decoder); } +static DecodeStatus DecodeDPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + + uint16_t Register = getReg(Decoder, AArch64::DPairRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Register)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeQPairRegisterClass(llvm::MCInst &Inst, unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + + uint16_t Register = getReg(Decoder, AArch64::QPairRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Register)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeDTripleRegisterClass(llvm::MCInst &Inst, + unsigned RegNo, uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + + uint16_t Register = getReg(Decoder, AArch64::DTripleRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Register)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeQTripleRegisterClass(llvm::MCInst &Inst, + unsigned RegNo, uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + + uint16_t Register = getReg(Decoder, AArch64::QTripleRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Register)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeDQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + + uint16_t Register = getReg(Decoder, AArch64::DQuadRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Register)); + return MCDisassembler::Success; +} + +static DecodeStatus DecodeQQuadRegisterClass(llvm::MCInst &Inst, unsigned RegNo, + uint64_t Address, + const void *Decoder) { + if (RegNo > 31) + return MCDisassembler::Fail; + + uint16_t Register = getReg(Decoder, AArch64::QQuadRegClassID, RegNo); + Inst.addOperand(MCOperand::CreateReg(Register)); + return MCDisassembler::Success; +} + static DecodeStatus DecodeAddrRegExtendOperand(llvm::MCInst &Inst, unsigned OptionHiS, uint64_t Address, Index: lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h =================================================================== --- lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h +++ lib/Target/AArch64/InstPrinter/AArch64InstPrinter.h @@ -174,6 +174,9 @@ raw_ostream &O); void printNeonUImm64MaskOperand(const MCInst *MI, unsigned OpNum, raw_ostream &O); + + template + void printVectorList(const MCInst *MI, unsigned OpNum, raw_ostream &O); }; } Index: lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp =================================================================== --- lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp +++ lib/Target/AArch64/InstPrinter/AArch64InstPrinter.cpp @@ -507,3 +507,33 @@ O << "#0x"; O.write_hex(Mask); } + +// If Count > 1, there are two valid kinds of vector list: +// (1) {Vn.layout, Vn+1.layout, ... , Vm.layout} +// (2) {Vn.layout - Vm.layout} +// We choose the first kind as output. +template +void AArch64InstPrinter::printVectorList(const MCInst *MI, unsigned OpNum, + raw_ostream &O) { + assert(Count >= 1 && Count <= 4 && "Invalid Number of Vectors"); + + unsigned Reg = MI->getOperand(OpNum).getReg(); + std::string LayoutStr = A64VectorLayoutToString(Layout); + O << "{"; + if (Count > 1) { // Print sub registers separately + bool IsVec64 = (Layout < A64Layout::_16B) ? true : false; + unsigned SubRegIdx = IsVec64 ? AArch64::dsub_0 : AArch64::qsub_0; + for (unsigned I = 0; I < Count; I++) { + std::string Name = getRegisterName(MRI.getSubReg(Reg, SubRegIdx++)); + Name[0] = 'v'; + O << Name << LayoutStr; + if (I != Count - 1) + O << ", "; + } + } else { // Print the register directly when NumVecs is 1. + std::string Name = getRegisterName(Reg); + Name[0] = 'v'; + O << Name << LayoutStr; + } + O << "}"; +} Index: lib/Target/AArch64/Utils/AArch64BaseInfo.h =================================================================== --- lib/Target/AArch64/Utils/AArch64BaseInfo.h +++ lib/Target/AArch64/Utils/AArch64BaseInfo.h @@ -306,6 +306,50 @@ }; } +namespace A64Layout { + enum VectorLayout { + Invalid = -1, + _8B, + _4H, + _2S, + _1D, + + _16B, + _8H, + _4S, + _2D + }; +} + +inline static const char * +A64VectorLayoutToString(A64Layout::VectorLayout Layout) { + switch (Layout) { + case A64Layout::_8B: return ".8b"; + case A64Layout::_4H: return ".4h"; + case A64Layout::_2S: return ".2s"; + case A64Layout::_1D: return ".1d"; + case A64Layout::_16B: return ".16b"; + case A64Layout::_8H: return ".8h"; + case A64Layout::_4S: return ".4s"; + case A64Layout::_2D: return ".2d"; + default: llvm_unreachable("Unknown Vector Layout"); + } +} + +inline static A64Layout::VectorLayout +A64StringToVectorLayout(StringRef LayoutStr) { + return StringSwitch(LayoutStr) + .Case(".8b", A64Layout::_8B) + .Case(".4h", A64Layout::_4H) + .Case(".2s", A64Layout::_2S) + .Case(".1d", A64Layout::_1D) + .Case(".16b", A64Layout::_16B) + .Case(".8h", A64Layout::_8H) + .Case(".4s", A64Layout::_4S) + .Case(".2d", A64Layout::_2D) + .Default(A64Layout::Invalid); +} + namespace A64SysReg { enum SysRegROValues { MDCCSR_EL0 = 0x9808, // 10 011 0000 0001 000 Index: test/MC/AArch64/neon-diagnostics.s =================================================================== --- test/MC/AArch64/neon-diagnostics.s +++ test/MC/AArch64/neon-diagnostics.s @@ -3880,3 +3880,286 @@ // CHECK-ERROR: error: invalid operand for instruction // CHECK-ERROR: frsqrts d8, s22, d18 // CHECK-ERROR: ^ + +//------------------------------------------------------------------------------ +// Vector load/store multiple N-element structure (class SIMD lselem) +//------------------------------------------------------------------------------ + ld1 {x3}, [x2] + ld1 {v32.16b}, [x0] + ld1 {v15.8h}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld1 {x3}, [x2] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld1 {v32.16b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld1 {v15.8h}, [x32] +// CHECK-ERROR: ^ + + ld1 {v0.16b, x1}, [x0] + ld1 {v15.8h, v16.4h}, [x15] + ld1 {v0.8b, v2.8b}, [x0] + ld1 {v15.4h, v16.4h}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld1 {v0.16b, x1}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld1 {v15.8h, v16.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld1 {v0.8b, v2.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld1 {v15.4h, v16.4h}, [x32] +// CHECK-ERROR: ^ + + ld2 {v0.16b, x1}, [x0] + ld2 {v15.8h, v16.4h}, [x15] + ld2 {v0.8b, v2.8b}, [x0] + ld2 {v15.4h, v16.4h}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld2 {v0.16b, x1}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld2 {v15.8h, v16.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld2 {v0.8b, v2.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld2 {v15.4h, v16.4h}, [x32] +// CHECK-ERROR: ^ + + ld2 {v0.16b-x1}, [x0] + ld2 {v15.8h-v16.4h}, [x15] + ld2 {v0.2d-v2.2d}, [x0] + ld2 {v15.4h-v16.4h}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld2 {v0.16b-x1}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld2 {v15.8h-v16.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld2 {v0.2d-v2.2d}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld2 {v15.4h-v16.4h}, [x32] +// CHECK-ERROR: ^ + + ld3 {v0.16b, v1.16b, x2}, [x0] + ld3 {v15.8h, v16.8h, v17.4h}, [x15] + ld3 {v0.8b, v1,8b, v2.8b, v3.8b}, [x0] + ld3 {v0.8b, v2.8b, v3.8b}, [x0] + ld3 {v15.4h, v16.4h, v16.4h}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld3 {v0.16b, v1.16b, x2}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld3 {v15.8h, v16.8h, v17.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld3 {v0.8b, v1,8b, v2.8b, v3.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld3 {v0.8b, v2.8b, v3.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld3 {v15.4h, v16.4h, v16.4h}, [x32] +// CHECK-ERROR: ^ + + ld3 {v0.16b-x2}, [x0] + ld3 {v15.8h-v17.4h}, [x15] + ld3 {v31.4s-v2.4s}, [sp] + ld3 {v31.2s-v1.2s}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld3 {v0.16b-x2}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld3 {v15.8h-v17.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld3 {v31.4s-v2.4s}, [sp] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld3 {v31.2s-v1.2s}, [x32] +// CHECK-ERROR: ^ + + ld4 {v0.16b, v1.16b, v2.8b, x3}, [x0] + ld4 {v15.8h, v16.8h, v17.4h, v18.8h}, [x15] + ld4 {v2.8b, v3.8b, v4.8b}, [x0] + ld4 {v0.8b, v2.8b, v3.8b, v4.8b}, [x0] + ld4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x32] +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld4 {v0.16b, v1.16b, v2.8b, x3}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld4 {v15.8h, v16.8h, v17.4h, v18.8h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld4 {v2.8b, v3.8b, v4.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld4 {v0.8b, v2.8b, v3.8b, v4.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x32] +// CHECK-ERROR: ^ + + ld4 {v0.16b-v3}, [x0] + ld4 {v15.8h-v18.4h}, [x15] + ld4 {v0.8b-v2.8b}, [x0] + ld4 {v31.2s-v3.2s}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: ld4 {v0.16b-v3}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld4 {v15.8h-v18.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld4 {v0.8b-v2.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: ld4 {v31.2s-v3.2s}, [x32] +// CHECK-ERROR: ^ + + st1 {x3}, [x2] + st1 {v32.16b}, [x0] + st1 {v15.8h}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st1 {x3}, [x2] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st1 {v32.16b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st1 {v15.8h}, [x32] +// CHECK-ERROR: ^ + + st1 {v0.16b, x1}, [x0] + st1 {v15.8h, v16.4h}, [x15] + st1 {v0.8b, v2.8b}, [x0] + st1 {v15.4h, v16.4h}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st1 {v0.16b, x1}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st1 {v15.8h, v16.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st1 {v0.8b, v2.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st1 {v15.4h, v16.4h}, [x32] +// CHECK-ERROR: ^ + + st2 {v0.16b, x1}, [x0] + st2 {v15.8h, v16.4h}, [x15] + st2 {v0.8b, v2.8b}, [x0] + st2 {v15.4h, v16.4h}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st2 {v0.16b, x1}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st2 {v15.8h, v16.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st2 {v0.8b, v2.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st2 {v15.4h, v16.4h}, [x32] +// CHECK-ERROR: ^ + + st2 {v0.16b-x1}, [x0] + st2 {v15.8h-v16.4h}, [x15] + st2 {v0.2d-v2.2d}, [x0] + st2 {v15.4h-v16.4h}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st2 {v0.16b-x1}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st2 {v15.8h-v16.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st2 {v0.2d-v2.2d}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st2 {v15.4h-v16.4h}, [x32] +// CHECK-ERROR: ^ + + st3 {v0.16b, v1.16b, x2}, [x0] + st3 {v15.8h, v16.8h, v17.4h}, [x15] + st3 {v0.8b, v1,8b, v2.8b, v3.8b}, [x0] + st3 {v0.8b, v2.8b, v3.8b}, [x0] + st3 {v15.4h, v16.4h, v16.4h}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st3 {v0.16b, v1.16b, x2}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st3 {v15.8h, v16.8h, v17.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st3 {v0.8b, v1,8b, v2.8b, v3.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st3 {v0.8b, v2.8b, v3.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st3 {v15.4h, v16.4h, v16.4h}, [x32] +// CHECK-ERROR: ^ + + st3 {v0.16b-x2}, [x0] + st3 {v15.8h-v17.4h}, [x15] + st3 {v31.4s-v2.4s}, [sp] + st3 {v31.2s-v1.2s}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st3 {v0.16b-x2}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st3 {v15.8h-v17.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st3 {v31.4s-v2.4s}, [sp] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st3 {v31.2s-v1.2s}, [x32] +// CHECK-ERROR: ^ + + st4 {v0.16b, v1.16b, v2.8b, x3}, [x0] + st4 {v15.8h, v16.8h, v17.4h, v18.8h}, [x15] + st4 {v2.8b, v3.8b, v4.8b}, [x0] + st4 {v0.8b, v2.8b, v3.8b, v4.8b}, [x0] + st4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x32] +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st4 {v0.16b, v1.16b, v2.8b, x3}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st4 {v15.8h, v16.8h, v17.4h, v18.8h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st4 {v2.8b, v3.8b, v4.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st4 {v0.8b, v2.8b, v3.8b, v4.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st4 {v15.4h, v16.4h, v17.4h, v18.4h}, [x32] +// CHECK-ERROR: ^ + + st4 {v0.16b-v3}, [x0] + st4 {v15.8h-v18.4h}, [x15] + st4 {v0.8b-v2.8b}, [x0] + st4 {v31.2s-v3.2s}, [x32] +// CHECK-ERROR: error: expected vector type register +// CHECK-ERROR: st4 {v0.16b-v3}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st4 {v15.8h-v18.4h}, [x15] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st4 {v0.8b-v2.8b}, [x0] +// CHECK-ERROR: ^ +// CHECK-ERROR: error: invalid operand for instruction +// CHECK-ERROR: st4 {v31.2s-v3.2s}, [x32] +// CHECK-ERROR: ^ Index: tools/clang/include/clang/Basic/arm_neon.td =================================================================== --- tools/clang/include/clang/Basic/arm_neon.td +++ tools/clang/include/clang/Basic/arm_neon.td @@ -498,6 +498,26 @@ let isA64 = 1 in { //////////////////////////////////////////////////////////////////////////////// +// Load/Store +// With additional QUl, Ql, Qd type. +def LD1 : WInst<"vld1", "dc", + "QUcQUsQUiQUlQcQsQiQlQhQfQdQPcQPsUcUsUiUlcsilhfdPcPs">; +def LD2 : WInst<"vld2", "2c", + "QUcQUsQUiQUlQcQsQiQlQhQfQdQPcQPsUcUsUiUlcsilhfdPcPs">; +def LD3 : WInst<"vld3", "3c", + "QUcQUsQUiQUlQcQsQiQlQhQfQdQPcQPsUcUsUiUlcsilhfdPcPs">; +def LD4 : WInst<"vld4", "4c", + "QUcQUsQUiQUlQcQsQiQlQhQfQdQPcQPsUcUsUiUlcsilhfdPcPs">; +def ST1 : WInst<"vst1", "vpd", + "QUcQUsQUiQUlQcQsQiQlQhQfQdQPcQPsUcUsUiUlcsilhfdPcPs">; +def ST2 : WInst<"vst2", "vp2", + "QUcQUsQUiQUlQcQsQiQlQhQfQdQPcQPsUcUsUiUlcsilhfdPcPs">; +def ST3 : WInst<"vst3", "vp3", + "QUcQUsQUiQUlQcQsQiQlQhQfQdQPcQPsUcUsUiUlcsilhfdPcPs">; +def ST4 : WInst<"vst4", "vp4", + "QUcQUsQUiQUlQcQsQiQlQhQfQdQPcQPsUcUsUiUlcsilhfdPcPs">; + +//////////////////////////////////////////////////////////////////////////////// // Addition // With additional Qd type. def ADD : IOpInst<"vadd", "ddd", "csilfUcUsUiUlQcQsQiQlQfQUcQUsQUiQUlQd", OP_ADD>; Index: tools/clang/lib/CodeGen/CGBuiltin.cpp =================================================================== --- tools/clang/lib/CodeGen/CGBuiltin.cpp +++ tools/clang/lib/CodeGen/CGBuiltin.cpp @@ -2345,6 +2345,40 @@ return EmitNeonCall(F, Ops, "vcvt_n"); } + // Load/Store + case AArch64::BI__builtin_neon_vld1_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1_v, E); + case AArch64::BI__builtin_neon_vld1q_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld1q_v, E); + case AArch64::BI__builtin_neon_vld2_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2_v, E); + case AArch64::BI__builtin_neon_vld2q_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld2q_v, E); + case AArch64::BI__builtin_neon_vld3_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3_v, E); + case AArch64::BI__builtin_neon_vld3q_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld3q_v, E); + case AArch64::BI__builtin_neon_vld4_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4_v, E); + case AArch64::BI__builtin_neon_vld4q_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vld4q_v, E); + case AArch64::BI__builtin_neon_vst1_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst1_v, E); + case AArch64::BI__builtin_neon_vst1q_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst1q_v, E); + case AArch64::BI__builtin_neon_vst2_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2_v, E); + case AArch64::BI__builtin_neon_vst2q_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst2q_v, E); + case AArch64::BI__builtin_neon_vst3_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3_v, E); + case AArch64::BI__builtin_neon_vst3q_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst3q_v, E); + case AArch64::BI__builtin_neon_vst4_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4_v, E); + case AArch64::BI__builtin_neon_vst4q_v: + return EmitARMBuiltinExpr(ARM::BI__builtin_neon_vst4q_v, E); + // AArch64-only builtins case AArch64::BI__builtin_neon_vfma_lane_v: case AArch64::BI__builtin_neon_vfmaq_laneq_v: { Index: tools/clang/test/CodeGen/aarch64-neon-intrinsics.c =================================================================== --- tools/clang/test/CodeGen/aarch64-neon-intrinsics.c +++ tools/clang/test/CodeGen/aarch64-neon-intrinsics.c @@ -5657,3 +5657,1355 @@ // CHECK: frsqrte {{d[0-9]+}}, {{d[0-9]+}} return vrsqrted_f64(a); } + +uint8x16_t test_vld1q_u8(uint8_t const *a) { + // CHECK: test_vld1q_u8 + return vld1q_u8(a); + // CHECK: ld1 {v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +uint16x8_t test_vld1q_u16(uint16_t const *a) { + // CHECK: test_vld1q_u16 + return vld1q_u16(a); + // CHECK: ld1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +uint32x4_t test_vld1q_u32(uint32_t const *a) { + // CHECK: test_vld1q_u32 + return vld1q_u32(a); + // CHECK: ld1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +uint64x2_t test_vld1q_u64(uint64_t const *a) { + // CHECK: test_vld1q_u64 + return vld1q_u64(a); + // CHECK: ld1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +int8x16_t test_vld1q_s8(int8_t const *a) { + // CHECK: test_vld1q_s8 + return vld1q_s8(a); + // CHECK: ld1 {v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +int16x8_t test_vld1q_s16(int16_t const *a) { + // CHECK: test_vld1q_s16 + return vld1q_s16(a); + // CHECK: ld1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +int32x4_t test_vld1q_s32(int32_t const *a) { + // CHECK: test_vld1q_s32 + return vld1q_s32(a); + // CHECK: ld1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +int64x2_t test_vld1q_s64(int64_t const *a) { + // CHECK: test_vld1q_s64 + return vld1q_s64(a); + // CHECK: ld1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +float16x8_t test_vld1q_f16(float16_t const *a) { + // CHECK: test_vld1q_f16 + return vld1q_f16(a); + // CHECK: ld1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +float32x4_t test_vld1q_f32(float32_t const *a) { + // CHECK: test_vld1q_f32 + return vld1q_f32(a); + // CHECK: ld1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +float64x2_t test_vld1q_f64(float64_t const *a) { + // CHECK: test_vld1q_f64 + return vld1q_f64(a); + // CHECK: ld1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +poly8x16_t test_vld1q_p8(poly8_t const *a) { + // CHECK: test_vld1q_p8 + return vld1q_p8(a); + // CHECK: ld1 {v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +poly16x8_t test_vld1q_p16(poly16_t const *a) { + // CHECK: test_vld1q_p16 + return vld1q_p16(a); + // CHECK: ld1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +uint8x8_t test_vld1_u8(uint8_t const *a) { + // CHECK: test_vld1_u8 + return vld1_u8(a); + // CHECK: ld1 {v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +uint16x4_t test_vld1_u16(uint16_t const *a) { + // CHECK: test_vld1_u16 + return vld1_u16(a); + // CHECK: ld1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +uint32x2_t test_vld1_u32(uint32_t const *a) { + // CHECK: test_vld1_u32 + return vld1_u32(a); + // CHECK: ld1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +uint64x1_t test_vld1_u64(uint64_t const *a) { + // CHECK: test_vld1_u64 + return vld1_u64(a); + // CHECK: ld1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +int8x8_t test_vld1_s8(int8_t const *a) { + // CHECK: test_vld1_s8 + return vld1_s8(a); + // CHECK: ld1 {v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +int16x4_t test_vld1_s16(int16_t const *a) { + // CHECK: test_vld1_s16 + return vld1_s16(a); + // CHECK: ld1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +int32x2_t test_vld1_s32(int32_t const *a) { + // CHECK: test_vld1_s32 + return vld1_s32(a); + // CHECK: ld1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +int64x1_t test_vld1_s64(int64_t const *a) { + // CHECK: test_vld1_s64 + return vld1_s64(a); + // CHECK: ld1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +float16x4_t test_vld1_f16(float16_t const *a) { + // CHECK: test_vld1_f16 + return vld1_f16(a); + // CHECK: ld1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +float32x2_t test_vld1_f32(float32_t const *a) { + // CHECK: test_vld1_f32 + return vld1_f32(a); + // CHECK: ld1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +float64x1_t test_vld1_f64(float64_t const *a) { + // CHECK: test_vld1_f64 + return vld1_f64(a); + // CHECK: ld1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +poly8x8_t test_vld1_p8(poly8_t const *a) { + // CHECK: test_vld1_p8 + return vld1_p8(a); + // CHECK: ld1 {v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +poly16x4_t test_vld1_p16(poly16_t const *a) { + // CHECK: test_vld1_p16 + return vld1_p16(a); + // CHECK: ld1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +uint8x16x2_t test_vld2q_u8(uint8_t const *a) { + // CHECK: test_vld2q_u8 + return vld2q_u8(a); + // CHECK: ld2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +uint16x8x2_t test_vld2q_u16(uint16_t const *a) { + // CHECK: test_vld2q_u16 + return vld2q_u16(a); + // CHECK: ld2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +uint32x4x2_t test_vld2q_u32(uint32_t const *a) { + // CHECK: test_vld2q_u32 + return vld2q_u32(a); + // CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +uint64x2x2_t test_vld2q_u64(uint64_t const *a) { + // CHECK: test_vld2q_u64 + return vld2q_u64(a); + // CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +int8x16x2_t test_vld2q_s8(int8_t const *a) { + // CHECK: test_vld2q_s8 + return vld2q_s8(a); + // CHECK: ld2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +int16x8x2_t test_vld2q_s16(int16_t const *a) { + // CHECK: test_vld2q_s16 + return vld2q_s16(a); + // CHECK: ld2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +int32x4x2_t test_vld2q_s32(int32_t const *a) { + // CHECK: test_vld2q_s32 + return vld2q_s32(a); + // CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +int64x2x2_t test_vld2q_s64(int64_t const *a) { + // CHECK: test_vld2q_s64 + return vld2q_s64(a); + // CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +float16x8x2_t test_vld2q_f16(float16_t const *a) { + // CHECK: test_vld2q_f16 + return vld2q_f16(a); + // CHECK: ld2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +float32x4x2_t test_vld2q_f32(float32_t const *a) { + // CHECK: test_vld2q_f32 + return vld2q_f32(a); + // CHECK: ld2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +float64x2x2_t test_vld2q_f64(float64_t const *a) { + // CHECK: test_vld2q_f64 + return vld2q_f64(a); + // CHECK: ld2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +poly8x16x2_t test_vld2q_p8(poly8_t const *a) { + // CHECK: test_vld2q_p8 + return vld2q_p8(a); + // CHECK: ld2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +poly16x8x2_t test_vld2q_p16(poly16_t const *a) { + // CHECK: test_vld2q_p16 + return vld2q_p16(a); + // CHECK: ld2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +uint8x8x2_t test_vld2_u8(uint8_t const *a) { + // CHECK: test_vld2_u8 + return vld2_u8(a); + // CHECK: ld2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +uint16x4x2_t test_vld2_u16(uint16_t const *a) { + // CHECK: test_vld2_u16 + return vld2_u16(a); + // CHECK: ld2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +uint32x2x2_t test_vld2_u32(uint32_t const *a) { + // CHECK: test_vld2_u32 + return vld2_u32(a); + // CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +uint64x1x2_t test_vld2_u64(uint64_t const *a) { + // CHECK: test_vld2_u64 + return vld2_u64(a); + // CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +int8x8x2_t test_vld2_s8(int8_t const *a) { + // CHECK: test_vld2_s8 + return vld2_s8(a); + // CHECK: ld2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +int16x4x2_t test_vld2_s16(int16_t const *a) { + // CHECK: test_vld2_s16 + return vld2_s16(a); + // CHECK: ld2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +int32x2x2_t test_vld2_s32(int32_t const *a) { + // CHECK: test_vld2_s32 + return vld2_s32(a); + // CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +int64x1x2_t test_vld2_s64(int64_t const *a) { + // CHECK: test_vld2_s64 + return vld2_s64(a); + // CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +float16x4x2_t test_vld2_f16(float16_t const *a) { + // CHECK: test_vld2_f16 + return vld2_f16(a); + // CHECK: ld2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +float32x2x2_t test_vld2_f32(float32_t const *a) { + // CHECK: test_vld2_f32 + return vld2_f32(a); + // CHECK: ld2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +float64x1x2_t test_vld2_f64(float64_t const *a) { + // CHECK: test_vld2_f64 + return vld2_f64(a); + // CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +poly8x8x2_t test_vld2_p8(poly8_t const *a) { + // CHECK: test_vld2_p8 + return vld2_p8(a); + // CHECK: ld2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +poly16x4x2_t test_vld2_p16(poly16_t const *a) { + // CHECK: test_vld2_p16 + return vld2_p16(a); + // CHECK: ld2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +uint8x16x3_t test_vld3q_u8(uint8_t const *a) { + // CHECK: test_vld3q_u8 + return vld3q_u8(a); + // CHECK: ld3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, + // [{{x[0-9]+|sp}}] +} + +uint16x8x3_t test_vld3q_u16(uint16_t const *a) { + // CHECK: test_vld3q_u16 + return vld3q_u16(a); + // CHECK: ld3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, + // [{{x[0-9]+|sp}}] +} + +uint32x4x3_t test_vld3q_u32(uint32_t const *a) { + // CHECK: test_vld3q_u32 + return vld3q_u32(a); + // CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, + // [{{x[0-9]+|sp}}] +} + +uint64x2x3_t test_vld3q_u64(uint64_t const *a) { + // CHECK: test_vld3q_u64 + return vld3q_u64(a); + // CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, + // [{{x[0-9]+|sp}}] +} + +int8x16x3_t test_vld3q_s8(int8_t const *a) { + // CHECK: test_vld3q_s8 + return vld3q_s8(a); + // CHECK: ld3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, + // [{{x[0-9]+|sp}}] +} + +int16x8x3_t test_vld3q_s16(int16_t const *a) { + // CHECK: test_vld3q_s16 + return vld3q_s16(a); + // CHECK: ld3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, + // [{{x[0-9]+|sp}}] +} + +int32x4x3_t test_vld3q_s32(int32_t const *a) { + // CHECK: test_vld3q_s32 + return vld3q_s32(a); + // CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, + // [{{x[0-9]+|sp}}] +} + +int64x2x3_t test_vld3q_s64(int64_t const *a) { + // CHECK: test_vld3q_s64 + return vld3q_s64(a); + // CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, + // [{{x[0-9]+|sp}}] +} + +float16x8x3_t test_vld3q_f16(float16_t const *a) { + // CHECK: test_vld3q_f16 + return vld3q_f16(a); + // CHECK: ld3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, + // [{{x[0-9]+|sp}}] +} + +float32x4x3_t test_vld3q_f32(float32_t const *a) { + // CHECK: test_vld3q_f32 + return vld3q_f32(a); + // CHECK: ld3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, + // [{{x[0-9]+|sp}}] +} + +float64x2x3_t test_vld3q_f64(float64_t const *a) { + // CHECK: test_vld3q_f64 + return vld3q_f64(a); + // CHECK: ld3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, + // [{{x[0-9]+|sp}}] +} + +poly8x16x3_t test_vld3q_p8(poly8_t const *a) { + // CHECK: test_vld3q_p8 + return vld3q_p8(a); + // CHECK: ld3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, + // [{{x[0-9]+|sp}}] +} + +poly16x8x3_t test_vld3q_p16(poly16_t const *a) { + // CHECK: test_vld3q_p16 + return vld3q_p16(a); + // CHECK: ld3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, + // [{{x[0-9]+|sp}}] +} + +uint8x8x3_t test_vld3_u8(uint8_t const *a) { + // CHECK: test_vld3_u8 + return vld3_u8(a); + // CHECK: ld3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, + // [{{x[0-9]+|sp}}] +} + +uint16x4x3_t test_vld3_u16(uint16_t const *a) { + // CHECK: test_vld3_u16 + return vld3_u16(a); + // CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, + // [{{x[0-9]+|sp}}] +} + +uint32x2x3_t test_vld3_u32(uint32_t const *a) { + // CHECK: test_vld3_u32 + return vld3_u32(a); + // CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, + // [{{x[0-9]+|sp}}] +} + +uint64x1x3_t test_vld3_u64(uint64_t const *a) { + // CHECK: test_vld3_u64 + return vld3_u64(a); + // CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, + // [{{x[0-9]+|sp}}] +} + +int8x8x3_t test_vld3_s8(int8_t const *a) { + // CHECK: test_vld3_s8 + return vld3_s8(a); + // CHECK: ld3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, + // [{{x[0-9]+|sp}}] +} + +int16x4x3_t test_vld3_s16(int16_t const *a) { + // CHECK: test_vld3_s16 + return vld3_s16(a); + // CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, + // [{{x[0-9]+|sp}}] +} + +int32x2x3_t test_vld3_s32(int32_t const *a) { + // CHECK: test_vld3_s32 + return vld3_s32(a); + // CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, + // [{{x[0-9]+|sp}}] +} + +int64x1x3_t test_vld3_s64(int64_t const *a) { + // CHECK: test_vld3_s64 + return vld3_s64(a); + // CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, + // [{{x[0-9]+|sp}}] +} + +float16x4x3_t test_vld3_f16(float16_t const *a) { + // CHECK: test_vld3_f16 + return vld3_f16(a); + // CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, + // [{{x[0-9]+|sp}}] +} + +float32x2x3_t test_vld3_f32(float32_t const *a) { + // CHECK: test_vld3_f32 + return vld3_f32(a); + // CHECK: ld3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, + // [{{x[0-9]+|sp}}] +} + +float64x1x3_t test_vld3_f64(float64_t const *a) { + // CHECK: test_vld3_f64 + return vld3_f64(a); + // CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, + // [{{x[0-9]+|sp}}] +} + +poly8x8x3_t test_vld3_p8(poly8_t const *a) { + // CHECK: test_vld3_p8 + return vld3_p8(a); + // CHECK: ld3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, + // [{{x[0-9]+|sp}}] +} + +poly16x4x3_t test_vld3_p16(poly16_t const *a) { + // CHECK: test_vld3_p16 + return vld3_p16(a); + // CHECK: ld3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, + // [{{x[0-9]+|sp}}] +} + +uint8x16x4_t test_vld4q_u8(uint8_t const *a) { + // CHECK: test_vld4q_u8 + return vld4q_u8(a); + // CHECK: ld4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, + // v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +uint16x8x4_t test_vld4q_u16(uint16_t const *a) { + // CHECK: test_vld4q_u16 + return vld4q_u16(a); + // CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, + // v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +uint32x4x4_t test_vld4q_u32(uint32_t const *a) { + // CHECK: test_vld4q_u32 + return vld4q_u32(a); + // CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, + // v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +uint64x2x4_t test_vld4q_u64(uint64_t const *a) { + // CHECK: test_vld4q_u64 + return vld4q_u64(a); + // CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, + // v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +int8x16x4_t test_vld4q_s8(int8_t const *a) { + // CHECK: test_vld4q_s8 + return vld4q_s8(a); + // CHECK: ld4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, + // v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +int16x8x4_t test_vld4q_s16(int16_t const *a) { + // CHECK: test_vld4q_s16 + return vld4q_s16(a); + // CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, + // v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +int32x4x4_t test_vld4q_s32(int32_t const *a) { + // CHECK: test_vld4q_s32 + return vld4q_s32(a); + // CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, + // v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +int64x2x4_t test_vld4q_s64(int64_t const *a) { + // CHECK: test_vld4q_s64 + return vld4q_s64(a); + // CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, + // v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +float16x8x4_t test_vld4q_f16(float16_t const *a) { + // CHECK: test_vld4q_f16 + return vld4q_f16(a); + // CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, + // v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +float32x4x4_t test_vld4q_f32(float32_t const *a) { + // CHECK: test_vld4q_f32 + return vld4q_f32(a); + // CHECK: ld4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, + // v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +float64x2x4_t test_vld4q_f64(float64_t const *a) { + // CHECK: test_vld4q_f64 + return vld4q_f64(a); + // CHECK: ld4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, + // v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +poly8x16x4_t test_vld4q_p8(poly8_t const *a) { + // CHECK: test_vld4q_p8 + return vld4q_p8(a); + // CHECK: ld4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, + // v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +poly16x8x4_t test_vld4q_p16(poly16_t const *a) { + // CHECK: test_vld4q_p16 + return vld4q_p16(a); + // CHECK: ld4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, + // v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +uint8x8x4_t test_vld4_u8(uint8_t const *a) { + // CHECK: test_vld4_u8 + return vld4_u8(a); + // CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, + // v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +uint16x4x4_t test_vld4_u16(uint16_t const *a) { + // CHECK: test_vld4_u16 + return vld4_u16(a); + // CHECK: ld4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, + // v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +uint32x2x4_t test_vld4_u32(uint32_t const *a) { + // CHECK: test_vld4_u32 + return vld4_u32(a); + // CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, + // v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +uint64x1x4_t test_vld4_u64(uint64_t const *a) { + // CHECK: test_vld4_u64 + return vld4_u64(a); + // CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, + // v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +int8x8x4_t test_vld4_s8(int8_t const *a) { + // CHECK: test_vld4_s8 + return vld4_s8(a); + // CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, + // v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +int16x4x4_t test_vld4_s16(int16_t const *a) { + // CHECK: test_vld4_s16 + return vld4_s16(a); + // CHECK: ld4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, + // v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +int32x2x4_t test_vld4_s32(int32_t const *a) { + // CHECK: test_vld4_s32 + return vld4_s32(a); + // CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, + // v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +int64x1x4_t test_vld4_s64(int64_t const *a) { + // CHECK: test_vld4_s64 + return vld4_s64(a); + // CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, + // v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +float16x4x4_t test_vld4_f16(float16_t const *a) { + // CHECK: test_vld4_f16 + return vld4_f16(a); + // CHECK: ld4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, + // v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +float32x2x4_t test_vld4_f32(float32_t const *a) { + // CHECK: test_vld4_f32 + return vld4_f32(a); + // CHECK: ld4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, + // v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +float64x1x4_t test_vld4_f64(float64_t const *a) { + // CHECK: test_vld4_f64 + return vld4_f64(a); + // CHECK: ld1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, + // v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +poly8x8x4_t test_vld4_p8(poly8_t const *a) { + // CHECK: test_vld4_p8 + return vld4_p8(a); + // CHECK: ld4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, + // v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +poly16x4x4_t test_vld4_p16(poly16_t const *a) { + // CHECK: test_vld4_p16 + return vld4_p16(a); + // CHECK: ld4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, + // v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_u8(uint8_t *a, uint8x16_t b) { + // CHECK: test_vst1q_u8 + vst1q_u8(a, b); + // CHECK: st1 {v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_u16(uint16_t *a, uint16x8_t b) { + // CHECK: test_vst1q_u16 + vst1q_u16(a, b); + // CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_u32(uint32_t *a, uint32x4_t b) { + // CHECK: test_vst1q_u32 + vst1q_u32(a, b); + // CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_u64(uint64_t *a, uint64x2_t b) { + // CHECK: test_vst1q_u64 + vst1q_u64(a, b); + // CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_s8(int8_t *a, int8x16_t b) { + // CHECK: test_vst1q_s8 + vst1q_s8(a, b); + // CHECK: st1 {v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_s16(int16_t *a, int16x8_t b) { + // CHECK: test_vst1q_s16 + vst1q_s16(a, b); + // CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_s32(int32_t *a, int32x4_t b) { + // CHECK: test_vst1q_s32 + vst1q_s32(a, b); + // CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_s64(int64_t *a, int64x2_t b) { + // CHECK: test_vst1q_s64 + vst1q_s64(a, b); + // CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_f16(float16_t *a, float16x8_t b) { + // CHECK: test_vst1q_f16 + vst1q_f16(a, b); + // CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_f32(float32_t *a, float32x4_t b) { + // CHECK: test_vst1q_f32 + vst1q_f32(a, b); + // CHECK: st1 {v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_f64(float64_t *a, float64x2_t b) { + // CHECK: test_vst1q_f64 + vst1q_f64(a, b); + // CHECK: st1 {v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_p8(poly8_t *a, poly8x16_t b) { + // CHECK: test_vst1q_p8 + vst1q_p8(a, b); + // CHECK: st1 {v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +void test_vst1q_p16(poly16_t *a, poly16x8_t b) { + // CHECK: test_vst1q_p16 + vst1q_p16(a, b); + // CHECK: st1 {v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +void test_vst1_u8(uint8_t *a, uint8x8_t b) { + // CHECK: test_vst1_u8 + vst1_u8(a, b); + // CHECK: st1 {v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +void test_vst1_u16(uint16_t *a, uint16x4_t b) { + // CHECK: test_vst1_u16 + vst1_u16(a, b); + // CHECK: st1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +void test_vst1_u32(uint32_t *a, uint32x2_t b) { + // CHECK: test_vst1_u32 + vst1_u32(a, b); + // CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +void test_vst1_u64(uint64_t *a, uint64x1_t b) { + // CHECK: test_vst1_u64 + vst1_u64(a, b); + // CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +void test_vst1_s8(int8_t *a, int8x8_t b) { + // CHECK: test_vst1_s8 + vst1_s8(a, b); + // CHECK: st1 {v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +void test_vst1_s16(int16_t *a, int16x4_t b) { + // CHECK: test_vst1_s16 + vst1_s16(a, b); + // CHECK: st1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +void test_vst1_s32(int32_t *a, int32x2_t b) { + // CHECK: test_vst1_s32 + vst1_s32(a, b); + // CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +void test_vst1_s64(int64_t *a, int64x1_t b) { + // CHECK: test_vst1_s64 + vst1_s64(a, b); + // CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +void test_vst1_f16(float16_t *a, float16x4_t b) { + // CHECK: test_vst1_f16 + vst1_f16(a, b); + // CHECK: st1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +void test_vst1_f32(float32_t *a, float32x2_t b) { + // CHECK: test_vst1_f32 + vst1_f32(a, b); + // CHECK: st1 {v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +void test_vst1_f64(float64_t *a, float64x1_t b) { + // CHECK: test_vst1_f64 + vst1_f64(a, b); + // CHECK: st1 {v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +void test_vst1_p8(poly8_t *a, poly8x8_t b) { + // CHECK: test_vst1_p8 + vst1_p8(a, b); + // CHECK: st1 {v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +void test_vst1_p16(poly16_t *a, poly16x4_t b) { + // CHECK: test_vst1_p16 + vst1_p16(a, b); + // CHECK: st1 {v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_u8(uint8_t *a, uint8x16x2_t b) { + // CHECK: test_vst2q_u8 + vst2q_u8(a, b); + // CHECK: st2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_u16(uint16_t *a, uint16x8x2_t b) { + // CHECK: test_vst2q_u16 + vst2q_u16(a, b); + // CHECK: st2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_u32(uint32_t *a, uint32x4x2_t b) { + // CHECK: test_vst2q_u32 + vst2q_u32(a, b); + // CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_u64(uint64_t *a, uint64x2x2_t b) { + // CHECK: test_vst2q_u64 + vst2q_u64(a, b); + // CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_s8(int8_t *a, int8x16x2_t b) { + // CHECK: test_vst2q_s8 + vst2q_s8(a, b); + // CHECK: st2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_s16(int16_t *a, int16x8x2_t b) { + // CHECK: test_vst2q_s16 + vst2q_s16(a, b); + // CHECK: st2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_s32(int32_t *a, int32x4x2_t b) { + // CHECK: test_vst2q_s32 + vst2q_s32(a, b); + // CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_s64(int64_t *a, int64x2x2_t b) { + // CHECK: test_vst2q_s64 + vst2q_s64(a, b); + // CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_f16(float16_t *a, float16x8x2_t b) { + // CHECK: test_vst2q_f16 + vst2q_f16(a, b); + // CHECK: st2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_f32(float32_t *a, float32x4x2_t b) { + // CHECK: test_vst2q_f32 + vst2q_f32(a, b); + // CHECK: st2 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_f64(float64_t *a, float64x2x2_t b) { + // CHECK: test_vst2q_f64 + vst2q_f64(a, b); + // CHECK: st2 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_p8(poly8_t *a, poly8x16x2_t b) { + // CHECK: test_vst2q_p8 + vst2q_p8(a, b); + // CHECK: st2 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +void test_vst2q_p16(poly16_t *a, poly16x8x2_t b) { + // CHECK: test_vst2q_p16 + vst2q_p16(a, b); + // CHECK: st2 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +void test_vst2_u8(uint8_t *a, uint8x8x2_t b) { + // CHECK: test_vst2_u8 + vst2_u8(a, b); + // CHECK: st2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +void test_vst2_u16(uint16_t *a, uint16x4x2_t b) { + // CHECK: test_vst2_u16 + vst2_u16(a, b); + // CHECK: st2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +void test_vst2_u32(uint32_t *a, uint32x2x2_t b) { + // CHECK: test_vst2_u32 + vst2_u32(a, b); + // CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +void test_vst2_u64(uint64_t *a, uint64x1x2_t b) { + // CHECK: test_vst2_u64 + vst2_u64(a, b); + // CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +void test_vst2_s8(int8_t *a, int8x8x2_t b) { + // CHECK: test_vst2_s8 + vst2_s8(a, b); + // CHECK: st2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +void test_vst2_s16(int16_t *a, int16x4x2_t b) { + // CHECK: test_vst2_s16 + vst2_s16(a, b); + // CHECK: st2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +void test_vst2_s32(int32_t *a, int32x2x2_t b) { + // CHECK: test_vst2_s32 + vst2_s32(a, b); + // CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +void test_vst2_s64(int64_t *a, int64x1x2_t b) { + // CHECK: test_vst2_s64 + vst2_s64(a, b); + // CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +void test_vst2_f16(float16_t *a, float16x4x2_t b) { + // CHECK: test_vst2_f16 + vst2_f16(a, b); + // CHECK: st2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +void test_vst2_f32(float32_t *a, float32x2x2_t b) { + // CHECK: test_vst2_f32 + vst2_f32(a, b); + // CHECK: st2 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +void test_vst2_f64(float64_t *a, float64x1x2_t b) { + // CHECK: test_vst2_f64 + vst2_f64(a, b); + // CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +void test_vst2_p8(poly8_t *a, poly8x8x2_t b) { + // CHECK: test_vst2_p8 + vst2_p8(a, b); + // CHECK: st2 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +void test_vst2_p16(poly16_t *a, poly16x4x2_t b) { + // CHECK: test_vst2_p16 + vst2_p16(a, b); + // CHECK: st2 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +void test_vst3q_u8(uint8_t *a, uint8x16x3_t b) { + // CHECK: test_vst3q_u8 + vst3q_u8(a, b); + // CHECK: st3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3q_u16(uint16_t *a, uint16x8x3_t b) { + // CHECK: test_vst3q_u16 + vst3q_u16(a, b); + // CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3q_u32(uint32_t *a, uint32x4x3_t b) { + // CHECK: test_vst3q_u32 + vst3q_u32(a, b); + // CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3q_u64(uint64_t *a, uint64x2x3_t b) { + // CHECK: test_vst3q_u64 + vst3q_u64(a, b); + // CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3q_s8(int8_t *a, int8x16x3_t b) { + // CHECK: test_vst3q_s8 + vst3q_s8(a, b); + // CHECK: st3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3q_s16(int16_t *a, int16x8x3_t b) { + // CHECK: test_vst3q_s16 + vst3q_s16(a, b); + // CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3q_s32(int32_t *a, int32x4x3_t b) { + // CHECK: test_vst3q_s32 + vst3q_s32(a, b); + // CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3q_s64(int64_t *a, int64x2x3_t b) { + // CHECK: test_vst3q_s64 + vst3q_s64(a, b); + // CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3q_f16(float16_t *a, float16x8x3_t b) { + // CHECK: test_vst3q_f16 + vst3q_f16(a, b); + // CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3q_f32(float32_t *a, float32x4x3_t b) { + // CHECK: test_vst3q_f32 + vst3q_f32(a, b); + // CHECK: st3 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3q_f64(float64_t *a, float64x2x3_t b) { + // CHECK: test_vst3q_f64 + vst3q_f64(a, b); + // CHECK: st3 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3q_p8(poly8_t *a, poly8x16x3_t b) { + // CHECK: test_vst3q_p8 + vst3q_p8(a, b); + // CHECK: st3 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3q_p16(poly16_t *a, poly16x8x3_t b) { + // CHECK: test_vst3q_p16 + vst3q_p16(a, b); + // CHECK: st3 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_u8(uint8_t *a, uint8x8x3_t b) { + // CHECK: test_vst3_u8 + vst3_u8(a, b); + // CHECK: st3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_u16(uint16_t *a, uint16x4x3_t b) { + // CHECK: test_vst3_u16 + vst3_u16(a, b); + // CHECK: st3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_u32(uint32_t *a, uint32x2x3_t b) { + // CHECK: test_vst3_u32 + vst3_u32(a, b); + // CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_u64(uint64_t *a, uint64x1x3_t b) { + // CHECK: test_vst3_u64 + vst3_u64(a, b); + // CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_s8(int8_t *a, int8x8x3_t b) { + // CHECK: test_vst3_s8 + vst3_s8(a, b); + // CHECK: st3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_s16(int16_t *a, int16x4x3_t b) { + // CHECK: test_vst3_s16 + vst3_s16(a, b); + // CHECK: st3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_s32(int32_t *a, int32x2x3_t b) { + // CHECK: test_vst3_s32 + vst3_s32(a, b); + // CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_s64(int64_t *a, int64x1x3_t b) { + // CHECK: test_vst3_s64 + vst3_s64(a, b); + // CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_f16(float16_t *a, float16x4x3_t b) { + // CHECK: test_vst3_f16 + vst3_f16(a, b); + // CHECK: st3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_f32(float32_t *a, float32x2x3_t b) { + // CHECK: test_vst3_f32 + vst3_f32(a, b); + // CHECK: st3 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_f64(float64_t *a, float64x1x3_t b) { + // CHECK: test_vst3_f64 + vst3_f64(a, b); + // CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_p8(poly8_t *a, poly8x8x3_t b) { + // CHECK: test_vst3_p8 + vst3_p8(a, b); + // CHECK: st3 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b}, + // [{{x[0-9]+|sp}}] +} + +void test_vst3_p16(poly16_t *a, poly16x4x3_t b) { + // CHECK: test_vst3_p16 + vst3_p16(a, b); + // CHECK: st3 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h}, + // [{{x[0-9]+|sp}}] +} + +void test_vst4q_u8(uint8_t *a, uint8x16x4_t b) { + // CHECK: test_vst4q_u8 + vst4q_u8(a, b); + // CHECK: st4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, + // v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +void test_vst4q_u16(uint16_t *a, uint16x8x4_t b) { + // CHECK: test_vst4q_u16 + vst4q_u16(a, b); + // CHECK: st4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, + // v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +void test_vst4q_u32(uint32_t *a, uint32x4x4_t b) { + // CHECK: test_vst4q_u32 + vst4q_u32(a, b); + // CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, + // v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +void test_vst4q_u64(uint64_t *a, uint64x2x4_t b) { + // CHECK: test_vst4q_u64 + vst4q_u64(a, b); + // CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, + // v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +void test_vst4q_s8(int8_t *a, int8x16x4_t b) { + // CHECK: test_vst4q_s8 + vst4q_s8(a, b); + // CHECK: st4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, + // v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +void test_vst4q_s16(int16_t *a, int16x8x4_t b) { + // CHECK: test_vst4q_s16 + vst4q_s16(a, b); + // CHECK: st4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, + // v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +void test_vst4q_s32(int32_t *a, int32x4x4_t b) { + // CHECK: test_vst4q_s32 + vst4q_s32(a, b); + // CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, + // v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +void test_vst4q_s64(int64_t *a, int64x2x4_t b) { + // CHECK: test_vst4q_s64 + vst4q_s64(a, b); + // CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, + // v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +void test_vst4q_f16(float16_t *a, float16x8x4_t b) { + // CHECK: test_vst4q_f16 + vst4q_f16(a, b); + // CHECK: st4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, + // v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +void test_vst4q_f32(float32_t *a, float32x4x4_t b) { + // CHECK: test_vst4q_f32 + vst4q_f32(a, b); + // CHECK: st4 {v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, v{{[0-9]+}}.4s, + // v{{[0-9]+}}.4s}, [{{x[0-9]+|sp}}] +} + +void test_vst4q_f64(float64_t *a, float64x2x4_t b) { + // CHECK: test_vst4q_f64 + vst4q_f64(a, b); + // CHECK: st4 {v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, v{{[0-9]+}}.2d, + // v{{[0-9]+}}.2d}, [{{x[0-9]+|sp}}] +} + +void test_vst4q_p8(poly8_t *a, poly8x16x4_t b) { + // CHECK: test_vst4q_p8 + vst4q_p8(a, b); + // CHECK: st4 {v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, v{{[0-9]+}}.16b, + // v{{[0-9]+}}.16b}, [{{x[0-9]+|sp}}] +} + +void test_vst4q_p16(poly16_t *a, poly16x8x4_t b) { + // CHECK: test_vst4q_p16 + vst4q_p16(a, b); + // CHECK: st4 {v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, v{{[0-9]+}}.8h, + // v{{[0-9]+}}.8h}, [{{x[0-9]+|sp}}] +} + +void test_vst4_u8(uint8_t *a, uint8x8x4_t b) { + // CHECK: test_vst4_u8 + vst4_u8(a, b); + // CHECK: st4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, + // v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +void test_vst4_u16(uint16_t *a, uint16x4x4_t b) { + // CHECK: test_vst4_u16 + vst4_u16(a, b); + // CHECK: st4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, + // v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +void test_vst4_u32(uint32_t *a, uint32x2x4_t b) { + // CHECK: test_vst4_u32 + vst4_u32(a, b); + // CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, + // v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +void test_vst4_u64(uint64_t *a, uint64x1x4_t b) { + // CHECK: test_vst4_u64 + vst4_u64(a, b); + // CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, + // v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +void test_vst4_s8(int8_t *a, int8x8x4_t b) { + // CHECK: test_vst4_s8 + vst4_s8(a, b); + // CHECK: st4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, + // v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +void test_vst4_s16(int16_t *a, int16x4x4_t b) { + // CHECK: test_vst4_s16 + vst4_s16(a, b); + // CHECK: st4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, + // v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +void test_vst4_s32(int32_t *a, int32x2x4_t b) { + // CHECK: test_vst4_s32 + vst4_s32(a, b); + // CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, + // v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +void test_vst4_s64(int64_t *a, int64x1x4_t b) { + // CHECK: test_vst4_s64 + vst4_s64(a, b); + // CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, + // v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +void test_vst4_f16(float16_t *a, float16x4x4_t b) { + // CHECK: test_vst4_f16 + vst4_f16(a, b); + // CHECK: st4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, + // v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +} + +void test_vst4_f32(float32_t *a, float32x2x4_t b) { + // CHECK: test_vst4_f32 + vst4_f32(a, b); + // CHECK: st4 {v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, v{{[0-9]+}}.2s, + // v{{[0-9]+}}.2s}, [{{x[0-9]+|sp}}] +} + +void test_vst4_f64(float64_t *a, float64x1x4_t b) { + // CHECK: test_vst4_f64 + vst4_f64(a, b); + // CHECK: st1 {v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, v{{[0-9]+}}.1d, + // v{{[0-9]+}}.1d}, [{{x[0-9]+|sp}}] +} + +void test_vst4_p8(poly8_t *a, poly8x8x4_t b) { + // CHECK: test_vst4_p8 + vst4_p8(a, b); + // CHECK: st4 {v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, v{{[0-9]+}}.8b, + // v{{[0-9]+}}.8b}, [{{x[0-9]+|sp}}] +} + +void test_vst4_p16(poly16_t *a, poly16x4x4_t b) { + // CHECK: test_vst4_p16 + vst4_p16(a, b); + // CHECK: st4 {v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, v{{[0-9]+}}.4h, + // v{{[0-9]+}}.4h}, [{{x[0-9]+|sp}}] +}