diff --git a/llvm/include/llvm/IR/IntrinsicsARM.td b/llvm/include/llvm/IR/IntrinsicsARM.td --- a/llvm/include/llvm/IR/IntrinsicsARM.td +++ b/llvm/include/llvm/IR/IntrinsicsARM.td @@ -783,4 +783,58 @@ [], [IntrReadMem, IntrWriteMem]>; +def int_arm_mve_pred_i2v : Intrinsic< + [llvm_anyvector_ty], [llvm_i32_ty], [IntrNoMem]>; +def int_arm_mve_pred_v2i : Intrinsic< + [llvm_i32_ty], [llvm_anyvector_ty], [IntrNoMem]>; + +multiclass IntrinsicSignSuffix rets, list params = [], + list props = [], + string name = "", + list sdprops = []> { + def _s: Intrinsic; + def _u: Intrinsic; +} + +def int_arm_mve_add_predicated: Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>], + [IntrNoMem]>; +def int_arm_mve_sub_predicated: Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_anyvector_ty, LLVMMatchType<0>], + [IntrNoMem]>; + +defm int_arm_mve_minv: IntrinsicSignSuffix<[llvm_i32_ty], + [llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>; + +def int_arm_mve_vld2q: Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>], [llvm_anyptr_ty], [IntrReadMem]>; +def int_arm_mve_vld4q: Intrinsic<[llvm_anyvector_ty, LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [llvm_anyptr_ty], [IntrReadMem]>; + +def int_arm_mve_vst2q: Intrinsic<[], [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, llvm_i32_ty], [IntrWriteMem]>; +def int_arm_mve_vst4q: Intrinsic<[], [llvm_anyptr_ty, llvm_anyvector_ty, LLVMMatchType<1>, LLVMMatchType<1>, LLVMMatchType<1>, llvm_i32_ty], [IntrWriteMem] +>; + +def int_arm_mve_fltnarrow: Intrinsic<[llvm_v8f16_ty], + [llvm_v8f16_ty, llvm_v4f32_ty, llvm_i32_ty], [IntrNoMem]>; +def int_arm_mve_fltnarrow_predicated: Intrinsic<[llvm_v8f16_ty], + [llvm_v8f16_ty, llvm_v4f32_ty, llvm_i32_ty, llvm_v4i1_ty], [IntrNoMem]>; + +def int_arm_mve_vldr_gather_base_wb: Intrinsic< + [llvm_anyvector_ty, llvm_anyvector_ty], + [LLVMMatchType<1>, llvm_i32_ty], [IntrReadMem]>; +def int_arm_mve_vldr_gather_base_wb_predicated: Intrinsic< + [llvm_anyvector_ty, llvm_anyvector_ty], + [LLVMMatchType<1>, llvm_i32_ty, llvm_anyvector_ty], [IntrReadMem]>; + +def int_arm_mve_urshrl: Intrinsic< + [llvm_i32_ty, llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty, llvm_i32_ty], + [IntrNoMem]>; + +def int_arm_mve_vadc: Intrinsic< + [llvm_anyvector_ty, llvm_i32_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem]>; +def int_arm_mve_vadc_predicated: Intrinsic< + [llvm_anyvector_ty, llvm_i32_ty], + [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>, + llvm_i32_ty, llvm_anyvector_ty], [IntrNoMem]>; + } // end TargetPrefix diff --git a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp --- a/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp +++ b/llvm/lib/Target/ARM/ARMISelDAGToDAG.cpp @@ -207,6 +207,36 @@ unsigned NumVecs, const uint16_t *DOpcodes, const uint16_t *QOpcodes); + /// Helper functions for setting up clusters of MVE predication operands. + template + void MVE_Predicated(SDValueVector &Ops, SDLoc Loc, SDValue PredicateMask); + template + void MVE_Predicated(SDValueVector &Ops, SDLoc Loc, SDValue PredicateMask, + SDValue Inactive); + + template + void MVE_Unpredicated(SDValueVector &Ops, SDLoc Loc); + template + void MVE_Unpredicated(SDValueVector &Ops, SDLoc Loc, EVT InactiveTy); + + /// SelectMVE_VLD - Select MVE interleaving load intrinsics. NumVecs + /// should be 2 or 4. The opcode array specifies the instructions + /// used for 8, 16 and 32-bit lane sizes respectively, and each + /// pointer points to a set of NumVecs sub-opcodes used for the + /// different stages (e.g. VLD20 versus VLD21) of each load family. + void SelectMVE_VLD(SDNode *N, unsigned NumVecs, + const uint16_t *const *Opcodes); + + /// SelectMVE_WB - Select MVE writeback load/store intrinsics. + void SelectMVE_WB(SDNode *N, const uint16_t *Opcodes, bool Predicated); + + /// SelectMVE_LongShift - Select MVE 64-bit scalar shift intrinsics. + void SelectMVE_LongShift(SDNode *N, uint16_t Opcode, bool Immediate); + + /// SelectMVE_VADCSBC - Select MVE vector add/sub-with-carry intrinsics. + void SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry, + uint16_t OpcodeWithNoCarry, bool Add, bool Predicated); + /// SelectVLDDup - Select NEON load-duplicate intrinsics. NumVecs /// should be 1, 2, 3 or 4. The opcode array specifies the instructions used /// for loading D registers. @@ -2280,6 +2310,168 @@ CurDAG->RemoveDeadNode(N); } +template +void ARMDAGToDAGISel::MVE_Predicated(SDValueVector &Ops, SDLoc Loc, + SDValue PredicateMask) { + Ops.push_back(CurDAG->getTargetConstant(ARMVCC::Then, Loc, MVT::i32)); + Ops.push_back(PredicateMask); +} + +template +void ARMDAGToDAGISel::MVE_Predicated(SDValueVector &Ops, SDLoc Loc, + SDValue PredicateMask, SDValue Inactive) { + Ops.push_back(CurDAG->getTargetConstant(ARMVCC::Then, Loc, MVT::i32)); + Ops.push_back(PredicateMask); + Ops.push_back(Inactive); +} + +template +void ARMDAGToDAGISel::MVE_Unpredicated(SDValueVector &Ops, SDLoc Loc) { + Ops.push_back(CurDAG->getTargetConstant(ARMVCC::None, Loc, MVT::i32)); + Ops.push_back(CurDAG->getRegister(0, MVT::i32)); +} + +template +void ARMDAGToDAGISel::MVE_Unpredicated(SDValueVector &Ops, SDLoc Loc, + EVT InactiveTy) { + Ops.push_back(CurDAG->getTargetConstant(ARMVCC::None, Loc, MVT::i32)); + Ops.push_back(CurDAG->getRegister(0, MVT::i32)); + Ops.push_back(SDValue( + CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, Loc, InactiveTy), 0)); +} + +void ARMDAGToDAGISel::SelectMVE_VLD(SDNode *N, unsigned NumVecs, + const uint16_t *const *Opcodes) { + EVT VT = N->getValueType(0); + SDLoc Loc(N); + + const uint16_t *OurOpcodes; + switch (VT.getVectorElementType().getSizeInBits()) { + case 8: + OurOpcodes = Opcodes[0]; + break; + case 16: + OurOpcodes = Opcodes[1]; + break; + case 32: + OurOpcodes = Opcodes[2]; + break; + default: + llvm_unreachable("bad vector element size in SelectMVE_VLD"); + } + + EVT DataTy = EVT::getVectorVT(*CurDAG->getContext(), MVT::i64, NumVecs * 2); + EVT ResultTys[] = {DataTy, MVT::Other}; + + auto Data = SDValue( + CurDAG->getMachineNode(TargetOpcode::IMPLICIT_DEF, Loc, DataTy), 0); + SDValue Chain = N->getOperand(0); + for (unsigned Stage = 0; Stage < NumVecs; ++Stage) { + SDValue Ops[] = {Data, N->getOperand(2), Chain}; + auto LoadInst = + CurDAG->getMachineNode(OurOpcodes[Stage], Loc, ResultTys, Ops); + Data = SDValue(LoadInst, 0); + Chain = SDValue(LoadInst, 1); + } + + for (unsigned i = 0; i < NumVecs; i++) + ReplaceUses(SDValue(N, i), + CurDAG->getTargetExtractSubreg(ARM::qsub_0 + i, Loc, VT, Data)); + ReplaceUses(SDValue(N, NumVecs), Chain); + CurDAG->RemoveDeadNode(N); +} + +void ARMDAGToDAGISel::SelectMVE_WB(SDNode *N, const uint16_t *Opcodes, + bool Predicated) { + SDLoc Loc(N); + SmallVector Ops; + + uint16_t Opcode; + switch (N->getValueType(1).getVectorElementType().getSizeInBits()) { + case 32: + Opcode = Opcodes[0]; + break; + case 64: + Opcode = Opcodes[1]; + break; + default: + llvm_unreachable("bad vector element size in SelectMVE_WB"); + } + + Ops.push_back(N->getOperand(2)); // vector of base addresses + + int32_t ImmValue = cast(N->getOperand(3))->getZExtValue(); + Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate offset + + if (Predicated) + MVE_Predicated(Ops, Loc, N->getOperand(4)); + else + MVE_Unpredicated(Ops, Loc); + + Ops.push_back(N->getOperand(0)); // chain + + CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops)); +} + +void ARMDAGToDAGISel::SelectMVE_LongShift(SDNode *N, uint16_t Opcode, + bool Immediate) { + SDLoc Loc(N); + SmallVector Ops; + + // Two 32-bit halves of the value to be shifted + Ops.push_back(N->getOperand(1)); + Ops.push_back(N->getOperand(2)); + + // The shift count + if (Immediate) { + int32_t ImmValue = cast(N->getOperand(3))->getZExtValue(); + Ops.push_back(getI32Imm(ImmValue, Loc)); // immediate offset + } else { + Ops.push_back(N->getOperand(3)); + } + + // MVE scalar shifts are IT-predicable, so include the standard + // predicate arguments. + Ops.push_back(getAL(CurDAG, Loc)); + Ops.push_back(CurDAG->getRegister(0, MVT::i32)); + + CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops)); +} + +void ARMDAGToDAGISel::SelectMVE_VADCSBC(SDNode *N, uint16_t OpcodeWithCarry, + uint16_t OpcodeWithNoCarry, + bool Add, bool Predicated) { + SDLoc Loc(N); + SmallVector Ops; + uint16_t Opcode; + + unsigned FirstInputOp = Predicated ? 2 : 1; + + // Two input vectors and the input carry flag + Ops.push_back(N->getOperand(FirstInputOp)); + Ops.push_back(N->getOperand(FirstInputOp + 1)); + SDValue CarryIn = N->getOperand(FirstInputOp + 2); + ConstantSDNode *CarryInConstant = dyn_cast(CarryIn); + uint32_t CarryMask = 1 << 29; + uint32_t CarryExpected = Add ? 0 : CarryMask; + if (CarryInConstant && + (CarryInConstant->getZExtValue() & CarryMask) == CarryExpected) { + Opcode = OpcodeWithNoCarry; + } else { + Ops.push_back(CarryIn); + Opcode = OpcodeWithCarry; + } + + if (Predicated) + MVE_Predicated(Ops, Loc, + N->getOperand(FirstInputOp + 3), // predicate + N->getOperand(FirstInputOp - 1)); // inactive + else + MVE_Unpredicated(Ops, Loc, N->getValueType(0)); + + CurDAG->SelectNodeTo(N, Opcode, N->getVTList(), makeArrayRef(Ops)); +} + void ARMDAGToDAGISel::SelectVLDDup(SDNode *N, bool IsIntrinsic, bool isUpdating, unsigned NumVecs, const uint16_t *DOpcodes, @@ -4004,7 +4196,60 @@ SelectVLDSTLane(N, false, false, 4, DOpcodes, QOpcodes); return; } + + case Intrinsic::arm_mve_vld2q: { + static const uint16_t Opcodes8[] = {ARM::MVE_VLD20_8, ARM::MVE_VLD21_8}; + static const uint16_t Opcodes16[] = {ARM::MVE_VLD20_16, + ARM::MVE_VLD21_16}; + static const uint16_t Opcodes32[] = {ARM::MVE_VLD20_32, + ARM::MVE_VLD21_32}; + static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32}; + SelectMVE_VLD(N, 2, Opcodes); + return; + } + + case Intrinsic::arm_mve_vld4q: { + static const uint16_t Opcodes8[] = {ARM::MVE_VLD40_8, ARM::MVE_VLD41_8, + ARM::MVE_VLD42_8, ARM::MVE_VLD43_8}; + static const uint16_t Opcodes16[] = {ARM::MVE_VLD40_16, ARM::MVE_VLD41_16, + ARM::MVE_VLD42_16, + ARM::MVE_VLD43_16}; + static const uint16_t Opcodes32[] = {ARM::MVE_VLD40_32, ARM::MVE_VLD41_32, + ARM::MVE_VLD42_32, + ARM::MVE_VLD43_32}; + static const uint16_t *const Opcodes[] = {Opcodes8, Opcodes16, Opcodes32}; + SelectMVE_VLD(N, 4, Opcodes); + return; + } + + case Intrinsic::arm_mve_vldr_gather_base_wb: + case Intrinsic::arm_mve_vldr_gather_base_wb_predicated: + static const uint16_t Opcodes[] = {ARM::MVE_VLDRWU32_qi_pre, + ARM::MVE_VLDRDU64_qi_pre}; + SelectMVE_WB(N, Opcodes, + IntNo == Intrinsic::arm_mve_vldr_gather_base_wb_predicated); + return; + } + break; + } + + case ISD::INTRINSIC_WO_CHAIN: { + unsigned IntNo = cast(N->getOperand(0))->getZExtValue(); + switch (IntNo) { + default: + break; + + case Intrinsic::arm_mve_urshrl: + SelectMVE_LongShift(N, ARM::MVE_URSHRL, true); + return; + + case Intrinsic::arm_mve_vadc: + case Intrinsic::arm_mve_vadc_predicated: + SelectMVE_VADCSBC(N, ARM::MVE_VADC, ARM::MVE_VADCI, true, + IntNo == Intrinsic::arm_mve_vadc_predicated); + return; } + break; } diff --git a/llvm/lib/Target/ARM/ARMInstrMVE.td b/llvm/lib/Target/ARM/ARMInstrMVE.td --- a/llvm/lib/Target/ARM/ARMInstrMVE.td +++ b/llvm/lib/Target/ARM/ARMInstrMVE.td @@ -274,6 +274,28 @@ let MIOperandInfo = (ops MQPR:$base, i32imm:$imm); } +// This class is effectively just a sort of 'subroutine', working around the +// fact that Tablegen has no explicit syntax for function definition. If you +// have a vector type like v8i16, and you want the corresponding predicate type +// that should be used in IR intrinsics whose source vector is of that type, +// you can refer to mkpred.p to compute it automatically, e.g. in +// foreach or multiclass definitions. +class mkpred { + ValueType p = !cond(!eq(VT.Value, v16i8.Value): v16i1, + !eq(VT.Value, v8i16.Value): v8i1, + !eq(VT.Value, v8f16.Value): v8i1, + !eq(VT.Value, v4i32.Value): v4i1, + !eq(VT.Value, v4f32.Value): v4i1, + // For vectors of 2 values, use v4i1 instead of v2i1 for + // the moment: MVE codegen doesn't support doing all the + // auxiliary operations on v2i1 such as vector shuffles, + // and also, there's no MVE compare instruction that will + // generate v2i1 directly. We could rethink this later if + // we have a better idea. + !eq(VT.Value, v2i64.Value): v4i1, + !eq(VT.Value, v2f64.Value): v4i1); +} + // --------- Start of base classes for the instructions themselves class MVE_MI; defm MVE_VMAXV : MVE_VMINMAXV_ty<"vmaxv", 0b0>; +let Predicates = [HasMVEInt] in { + foreach vtype = [v16i8, v8i16, v4i32] in { + def : Pat<(i32 (int_arm_mve_minv_s (i32 rGPR:$prev), (vtype MQPR:$vec))), + (i32 (MVE_VMINVs8 (i32 rGPR:$prev), (vtype MQPR:$vec)))>; + def : Pat<(i32 (int_arm_mve_minv_u (i32 rGPR:$prev), (vtype MQPR:$vec))), + (i32 (MVE_VMINVu8 (i32 rGPR:$prev), (vtype MQPR:$vec)))>; + } +} + multiclass MVE_VMINMAXAV_ty pattern=[]> { def s8 : MVE_VMINMAXV; def s16 : MVE_VMINMAXV; @@ -1434,7 +1465,7 @@ def MVE_VQRDMULHi32 : MVE_VQRDMULH<"s32", 0b10>; class MVE_VADDSUB size, bit subtract, - list pattern=[]> + ValueType VT_, list pattern=[]> : MVE_int { let Inst{28} = subtract; @@ -1443,37 +1474,51 @@ let Inst{12-8} = 0b01000; let Inst{4} = 0b0; let Inst{0} = 0b0; -} -class MVE_VADD size, list pattern=[]> - : MVE_VADDSUB<"vadd", suffix, size, 0b0, pattern>; -class MVE_VSUB size, list pattern=[]> - : MVE_VADDSUB<"vsub", suffix, size, 0b1, pattern>; + ValueType VT = VT_; +} -def MVE_VADDi8 : MVE_VADD<"i8", 0b00>; -def MVE_VADDi16 : MVE_VADD<"i16", 0b01>; -def MVE_VADDi32 : MVE_VADD<"i32", 0b10>; +class MVE_VADD size, ValueType VT> + : MVE_VADDSUB<"vadd", suffix, size, 0b0, VT>; +class MVE_VSUB size, ValueType VT> + : MVE_VADDSUB<"vsub", suffix, size, 0b1, VT>; -let Predicates = [HasMVEInt] in { - def : Pat<(v16i8 (add (v16i8 MQPR:$val1), (v16i8 MQPR:$val2))), - (v16i8 (MVE_VADDi8 (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)))>; - def : Pat<(v8i16 (add (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))), - (v8i16 (MVE_VADDi16 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)))>; - def : Pat<(v4i32 (add (v4i32 MQPR:$val1), (v4i32 MQPR:$val2))), - (v4i32 (MVE_VADDi32 (v4i32 MQPR:$val1), (v4i32 MQPR:$val2)))>; -} +def MVE_VADDi8 : MVE_VADD<"i8", 0b00, v16i8>; +def MVE_VADDi16 : MVE_VADD<"i16", 0b01, v8i16>; +def MVE_VADDi32 : MVE_VADD<"i32", 0b10, v4i32>; -def MVE_VSUBi8 : MVE_VSUB<"i8", 0b00>; -def MVE_VSUBi16 : MVE_VSUB<"i16", 0b01>; -def MVE_VSUBi32 : MVE_VSUB<"i32", 0b10>; +def MVE_VSUBi8 : MVE_VSUB<"i8", 0b00, v16i8>; +def MVE_VSUBi16 : MVE_VSUB<"i16", 0b01, v8i16>; +def MVE_VSUBi32 : MVE_VSUB<"i32", 0b10, v4i32>; let Predicates = [HasMVEInt] in { - def : Pat<(v16i8 (sub (v16i8 MQPR:$val1), (v16i8 MQPR:$val2))), - (v16i8 (MVE_VSUBi8 (v16i8 MQPR:$val1), (v16i8 MQPR:$val2)))>; - def : Pat<(v8i16 (sub (v8i16 MQPR:$val1), (v8i16 MQPR:$val2))), - (v8i16 (MVE_VSUBi16 (v8i16 MQPR:$val1), (v8i16 MQPR:$val2)))>; - def : Pat<(v4i32 (sub (v4i32 MQPR:$val1), (v4i32 MQPR:$val2))), - (v4i32 (MVE_VSUBi32 (v4i32 MQPR:$val1), (v4i32 MQPR:$val2)))>; + foreach instr = [MVE_VADDi8, MVE_VADDi16, MVE_VADDi32] in + foreach vtype = [instr.VT] in + foreach ptype = [mkpred.p] in { + def : Pat<(vtype (add (vtype MQPR:$Qm), (vtype MQPR:$Qn))), + (vtype (instr (vtype MQPR:$Qm), (vtype MQPR:$Qn)))>; + def : Pat<(vtype (int_arm_mve_add_predicated (vtype MQPR:$Qm), + (vtype MQPR:$Qn), + (ptype VCCR:$mask), + (vtype MQPR:$inactive))), + (vtype (instr (vtype MQPR:$Qm), (vtype MQPR:$Qn), + (i32 1), (ptype VCCR:$mask), + (vtype MQPR:$inactive)))>; + } + + foreach instr = [MVE_VSUBi8, MVE_VSUBi16, MVE_VSUBi32] in + foreach vtype = [instr.VT] in + foreach ptype = [mkpred.p] in { + def : Pat<(vtype (sub (vtype MQPR:$Qm), (vtype MQPR:$Qn))), + (vtype (instr (vtype MQPR:$Qm), (vtype MQPR:$Qn)))>; + def : Pat<(vtype (int_arm_mve_sub_predicated (vtype MQPR:$Qm), + (vtype MQPR:$Qn), + (ptype VCCR:$mask), + (vtype MQPR:$inactive))), + (vtype (instr (vtype MQPR:$Qm), (vtype MQPR:$Qn), + (i32 1), (ptype VCCR:$mask), + (vtype MQPR:$inactive)))>; + } } class MVE_VQADDSUB; class MVE_VADDSUBFMA_fp pattern=[]> : MVEFloatArithNeon; -def MVE_VFMAf16 : MVE_VADDSUBFMA_fp<"vfma", "f16", 0b1, 0b1, 0b0, 0b0, +def MVE_VFMAf16 : MVE_VADDSUBFMA_fp<"vfma", "f16", 0b1, 0b1, 0b0, 0b0, v8f16, (ins MQPR:$Qd_src), vpred_n, "$Qd = $Qd_src">; -def MVE_VFMSf32 : MVE_VADDSUBFMA_fp<"vfms", "f32", 0b0, 0b1, 0b0, 0b1, +def MVE_VFMSf32 : MVE_VADDSUBFMA_fp<"vfms", "f32", 0b0, 0b1, 0b0, 0b1, v4f32, (ins MQPR:$Qd_src), vpred_n, "$Qd = $Qd_src">; -def MVE_VFMSf16 : MVE_VADDSUBFMA_fp<"vfms", "f16", 0b1, 0b1, 0b0, 0b1, +def MVE_VFMSf16 : MVE_VADDSUBFMA_fp<"vfms", "f16", 0b1, 0b1, 0b0, 0b1, v8f16, (ins MQPR:$Qd_src), vpred_n, "$Qd = $Qd_src">; let Predicates = [HasMVEFloat, UseFusedMAC] in { @@ -2673,24 +2720,40 @@ } -def MVE_VADDf32 : MVE_VADDSUBFMA_fp<"vadd", "f32", 0b0, 0b0, 0b1, 0b0>; -def MVE_VADDf16 : MVE_VADDSUBFMA_fp<"vadd", "f16", 0b1, 0b0, 0b1, 0b0>; +def MVE_VADDf32 : MVE_VADDSUBFMA_fp<"vadd", "f32", 0b0, 0b0, 0b1, 0b0, v4f32>; +def MVE_VADDf16 : MVE_VADDSUBFMA_fp<"vadd", "f16", 0b1, 0b0, 0b1, 0b0, v8f16>; -let Predicates = [HasMVEFloat] in { - def : Pat<(v4f32 (fadd (v4f32 MQPR:$val1), (v4f32 MQPR:$val2))), - (v4f32 (MVE_VADDf32 (v4f32 MQPR:$val1), (v4f32 MQPR:$val2)))>; - def : Pat<(v8f16 (fadd (v8f16 MQPR:$val1), (v8f16 MQPR:$val2))), - (v8f16 (MVE_VADDf16 (v8f16 MQPR:$val1), (v8f16 MQPR:$val2)))>; -} - -def MVE_VSUBf32 : MVE_VADDSUBFMA_fp<"vsub", "f32", 0b0, 0b0, 0b1, 0b1>; -def MVE_VSUBf16 : MVE_VADDSUBFMA_fp<"vsub", "f16", 0b1, 0b0, 0b1, 0b1>; +def MVE_VSUBf32 : MVE_VADDSUBFMA_fp<"vsub", "f32", 0b0, 0b0, 0b1, 0b1, v4f32>; +def MVE_VSUBf16 : MVE_VADDSUBFMA_fp<"vsub", "f16", 0b1, 0b0, 0b1, 0b1, v8f16>; let Predicates = [HasMVEFloat] in { - def : Pat<(v4f32 (fsub (v4f32 MQPR:$val1), (v4f32 MQPR:$val2))), - (v4f32 (MVE_VSUBf32 (v4f32 MQPR:$val1), (v4f32 MQPR:$val2)))>; - def : Pat<(v8f16 (fsub (v8f16 MQPR:$val1), (v8f16 MQPR:$val2))), - (v8f16 (MVE_VSUBf16 (v8f16 MQPR:$val1), (v8f16 MQPR:$val2)))>; + foreach instr = [MVE_VADDf16, MVE_VADDf32] in + foreach vtype = [instr.VT] in + foreach ptype = [mkpred.p] in { + def : Pat<(vtype (fadd (vtype MQPR:$Qm), (vtype MQPR:$Qn))), + (vtype (instr (vtype MQPR:$Qm), (vtype MQPR:$Qn)))>; + def : Pat<(vtype (int_arm_mve_add_predicated (vtype MQPR:$Qm), + (vtype MQPR:$Qn), + (ptype VCCR:$mask), + (vtype MQPR:$inactive))), + (vtype (instr (vtype MQPR:$Qm), (vtype MQPR:$Qn), + (i32 1), (ptype VCCR:$mask), + (vtype MQPR:$inactive)))>; + } + + foreach instr = [MVE_VSUBf16, MVE_VSUBf32] in + foreach vtype = [instr.VT] in + foreach ptype = [mkpred.p] in { + def : Pat<(vtype (fsub (vtype MQPR:$Qm), (vtype MQPR:$Qn))), + (vtype (instr (vtype MQPR:$Qm), (vtype MQPR:$Qn)))>; + def : Pat<(vtype (int_arm_mve_sub_predicated (vtype MQPR:$Qm), + (vtype MQPR:$Qn), + (ptype VCCR:$mask), + (vtype MQPR:$inactive))), + (vtype (instr (vtype MQPR:$Qm), (vtype MQPR:$Qn), + (i32 1), (ptype VCCR:$mask), + (vtype MQPR:$inactive)))>; + } } class MVE_VCADD pattern=[]> @@ -3444,6 +3507,17 @@ defm MVE_VCVTf16f32 : MVE_VCVT_ff_halves<"f16.f32", 0b0>; defm MVE_VCVTf32f16 : MVE_VCVT_ff_halves<"f32.f16", 0b1>; +let Predicates = [HasMVEFloat] in { + def : Pat<(v8f16 (int_arm_mve_fltnarrow (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm), (i32 0))), + (v8f16 (MVE_VCVTf16f32bh (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm)))>; + def : Pat<(v8f16 (int_arm_mve_fltnarrow (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm), (i32 1))), + (v8f16 (MVE_VCVTf16f32th (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm)))>; + def : Pat<(v8f16 (int_arm_mve_fltnarrow_predicated (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm), (i32 0), (v4i1 VCCR:$mask))), + (v8f16 (MVE_VCVTf16f32bh (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm), (i32 1), (v4i1 VCCR:$mask)))>; + def : Pat<(v8f16 (int_arm_mve_fltnarrow_predicated (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm), (i32 1), (v4i1 VCCR:$mask))), + (v8f16 (MVE_VCVTf16f32th (v8f16 MQPR:$Qd_src), (v4f32 MQPR:$Qm), (i32 1), (v4i1 VCCR:$mask)))>; +} + class MVE_VxCADD size, bit halve, list pattern=[]> : MVE_qDest_qSrc; } +multiclass MVE_vst24_patterns { + foreach stage = [0,1] in + def : Pat<(int_arm_mve_vst2q i32:$addr, + (VT MQPR:$v0), (VT MQPR:$v1), (i32 stage)), + (!cast("MVE_VST2"#stage#"_"#lanesize) + (REG_SEQUENCE QQPR, VT:$v0, qsub_0, VT:$v1, qsub_1), + t2_addr_offset_none:$addr)>; + + foreach stage = [0,1,2,3] in + def : Pat<(int_arm_mve_vst4q i32:$addr, + (VT MQPR:$v0), (VT MQPR:$v1), + (VT MQPR:$v2), (VT MQPR:$v3), (i32 stage)), + (!cast("MVE_VST4"#stage#"_"#lanesize) + (REG_SEQUENCE QQQQPR, VT:$v0, qsub_0, VT:$v1, qsub_1, + VT:$v2, qsub_2, VT:$v3, qsub_3), + t2_addr_offset_none:$addr)>; +} +defm : MVE_vst24_patterns<8, v16i8>; +defm : MVE_vst24_patterns<16, v8i16>; +defm : MVE_vst24_patterns<32, v4i32>; + // end of MVE interleaving load/store // start of MVE predicable load/store @@ -5146,3 +5241,10 @@ def : Pat<(v16i8 (bitconvert (v8f16 MQPR:$src))), (v16i8 (MVE_VREV16_8 MQPR:$src))>; def : Pat<(v16i8 (bitconvert (v8i16 MQPR:$src))), (v16i8 (MVE_VREV16_8 MQPR:$src))>; } + +foreach vi1 = [ v16i1, v8i1, v4i1 ] in { + def : Pat<(vi1 (int_arm_mve_pred_i2v (i32 GPR:$pred))), + (vi1 (COPY_TO_REGCLASS GPR:$pred, VCCR))>; + def : Pat<(i32 (int_arm_mve_pred_v2i (vi1 VCCR:$pred))), + (i32 (COPY_TO_REGCLASS VCCR:$pred, GPR))>; +} diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/scalar-shifts.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/scalar-shifts.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/scalar-shifts.ll @@ -0,0 +1,23 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -o - %s | FileCheck %s + +define arm_aapcs_vfpcc i64 @test_urshrl(i64 %value) { +; CHECK-LABEL: test_urshrl: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: urshrl r0, r1, #6 +; CHECK-NEXT: bx lr +entry: + %0 = lshr i64 %value, 32 + %1 = trunc i64 %0 to i32 + %2 = trunc i64 %value to i32 + %3 = tail call { i32, i32 } @llvm.arm.mve.urshrl(i32 %2, i32 %1, i32 6) + %4 = extractvalue { i32, i32 } %3, 1 + %5 = zext i32 %4 to i64 + %6 = shl nuw i64 %5, 32 + %7 = extractvalue { i32, i32 } %3, 0 + %8 = zext i32 %7 to i64 + %9 = or i64 %6, %8 + ret i64 %9 +} + +declare { i32, i32 } @llvm.arm.mve.urshrl(i32, i32, i32) diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vadc.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vadc.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vadc.ll @@ -0,0 +1,98 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -o - %s | FileCheck %s + +define arm_aapcs_vfpcc <4 x i32> @test_vadciq_s32(<4 x i32> %a, <4 x i32> %b, i32* %carry_out) { +; CHECK-LABEL: test_vadciq_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vadci.i32 q0, q0, q1 +; CHECK-NEXT: vmrs r1, fpscr_nzcvqc +; CHECK-NEXT: ubfx r1, r1, #29, #1 +; CHECK-NEXT: str r1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vadc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 0) + %1 = extractvalue { <4 x i32>, i32 } %0, 1 + %2 = lshr i32 %1, 29 + %3 = and i32 %2, 1 + store i32 %3, i32* %carry_out, align 4 + %4 = extractvalue { <4 x i32>, i32 } %0, 0 + ret <4 x i32> %4 +} + +declare { <4 x i32>, i32 } @llvm.arm.mve.vadc.v4i32(<4 x i32>, <4 x i32>, i32) #1 + +define arm_aapcs_vfpcc <4 x i32> @test_vadcq_u32(<4 x i32> %a, <4 x i32> %b, i32* %carry) { +; CHECK-LABEL: test_vadcq_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: ldr r1, [r0] +; CHECK-NEXT: lsls r1, r1, #29 +; CHECK-NEXT: vmsr fpscr_nzcvqc, r1 +; CHECK-NEXT: vadc.i32 q0, q0, q1 +; CHECK-NEXT: vmrs r1, fpscr_nzcvqc +; CHECK-NEXT: ubfx r1, r1, #29, #1 +; CHECK-NEXT: str r1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load i32, i32* %carry, align 4 + %1 = shl i32 %0, 29 + %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vadc.v4i32(<4 x i32> %a, <4 x i32> %b, i32 %1) + %3 = extractvalue { <4 x i32>, i32 } %2, 1 + %4 = lshr i32 %3, 29 + %5 = and i32 %4, 1 + store i32 %5, i32* %carry, align 4 + %6 = extractvalue { <4 x i32>, i32 } %2, 0 + ret <4 x i32> %6 +} + +define arm_aapcs_vfpcc <4 x i32> @test_vadciq_m_u32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* %carry_out, i16 zeroext %p) { +; CHECK-LABEL: test_vadciq_m_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r1 +; CHECK-NEXT: vpst +; CHECK-NEXT: vadcit.i32 q0, q1, q2 +; CHECK-NEXT: vmrs r1, fpscr_nzcvqc +; CHECK-NEXT: ubfx r1, r1, #29, #1 +; CHECK-NEXT: str r1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vadc.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 0, <4 x i1> %1) + %3 = extractvalue { <4 x i32>, i32 } %2, 1 + %4 = lshr i32 %3, 29 + %5 = and i32 %4, 1 + store i32 %5, i32* %carry_out, align 4 + %6 = extractvalue { <4 x i32>, i32 } %2, 0 + ret <4 x i32> %6 +} + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) #1 + +declare { <4 x i32>, i32 } @llvm.arm.mve.vadc.predicated.v4i32.v4i1(<4 x i32>, <4 x i32>, <4 x i32>, i32, <4 x i1>) #1 + +define arm_aapcs_vfpcc <4 x i32> @test_vadcq_m_s32(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32* %carry, i16 zeroext %p) { +; CHECK-LABEL: test_vadcq_m_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: ldr r2, [r0] +; CHECK-NEXT: vmsr p0, r1 +; CHECK-NEXT: lsls r1, r2, #29 +; CHECK-NEXT: vmsr fpscr_nzcvqc, r1 +; CHECK-NEXT: vpst +; CHECK-NEXT: vadct.i32 q0, q1, q2 +; CHECK-NEXT: vmrs r1, fpscr_nzcvqc +; CHECK-NEXT: ubfx r1, r1, #29, #1 +; CHECK-NEXT: str r1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load i32, i32* %carry, align 4 + %1 = shl i32 %0, 29 + %2 = zext i16 %p to i32 + %3 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %2) + %4 = tail call { <4 x i32>, i32 } @llvm.arm.mve.vadc.predicated.v4i32.v4i1(<4 x i32> %inactive, <4 x i32> %a, <4 x i32> %b, i32 %1, <4 x i1> %3) + %5 = extractvalue { <4 x i32>, i32 } %4, 1 + %6 = lshr i32 %5, 29 + %7 = and i32 %6, 1 + store i32 %7, i32* %carry, align 4 + %8 = extractvalue { <4 x i32>, i32 } %4, 0 + ret <4 x i32> %8 +} diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vaddq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vaddq.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vaddq.ll @@ -0,0 +1,58 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -o - %s | FileCheck %s + +define arm_aapcs_vfpcc <4 x i32> @test_vaddq_u32(<4 x i32> %a, <4 x i32> %b) { +; CHECK-LABEL: test_vaddq_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vadd.i32 q0, q1, q0 +; CHECK-NEXT: bx lr +entry: + %0 = add <4 x i32> %b, %a + ret <4 x i32> %0 +} + +define arm_aapcs_vfpcc <8 x half> @test_vsubq_f16(<8 x half> %a, <8 x half> %b) { +; CHECK-LABEL: test_vsubq_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vsub.f16 q0, q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = fsub <8 x half> %a, %b + ret <8 x half> %0 +} + +define arm_aapcs_vfpcc <16 x i8> @test_vaddq_m_s8(<16 x i8> %inactive, <16 x i8> %a, <16 x i8> %b, i16 zeroext %p) { +; CHECK-LABEL: test_vaddq_m_s8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vaddt.i8 q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32 %0) + %2 = tail call <16 x i8> @llvm.arm.mve.add.predicated.v16i8.v16i1(<16 x i8> %a, <16 x i8> %b, <16 x i1> %1, <16 x i8> %inactive) + ret <16 x i8> %2 +} + +declare <16 x i1> @llvm.arm.mve.pred.i2v.v16i1(i32) + +declare <16 x i8> @llvm.arm.mve.add.predicated.v16i8.v16i1(<16 x i8>, <16 x i8>, <16 x i1>, <16 x i8>) + +define arm_aapcs_vfpcc <4 x float> @test_vsubq_m_f32(<4 x float> %inactive, <4 x float> %a, <4 x float> %b, i16 zeroext %p) { +; CHECK-LABEL: test_vsubq_m_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vsubt.f32 q0, q1, q2 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float> %a, <4 x float> %b, <4 x i1> %1, <4 x float> %inactive) + ret <4 x float> %2 +} + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) + +declare <4 x float> @llvm.arm.mve.sub.predicated.v4f32.v4i1(<4 x float>, <4 x float>, <4 x i1>, <4 x float>) diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcvt.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcvt.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vcvt.ll @@ -0,0 +1,32 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -o - %s | FileCheck %s + +define arm_aapcs_vfpcc <8 x half> @test_vcvttq_f16_f32(<8 x half> %a, <4 x float> %b) { +; CHECK-LABEL: test_vcvttq_f16_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vcvtt.f16.f32 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = tail call <8 x half> @llvm.arm.mve.fltnarrow(<8 x half> %a, <4 x float> %b, i32 1) + ret <8 x half> %0 +} + +declare <8 x half> @llvm.arm.mve.fltnarrow(<8 x half>, <4 x float>, i32) + +define arm_aapcs_vfpcc <8 x half> @test_vcvttq_m_f16_f32(<8 x half> %a, <4 x float> %b, i16 zeroext %p) { +; CHECK-LABEL: test_vcvttq_m_f16_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r0 +; CHECK-NEXT: vpst +; CHECK-NEXT: vcvttt.f16.f32 q0, q1 +; CHECK-NEXT: bx lr +entry: + %0 = zext i16 %p to i32 + %1 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %0) + %2 = tail call <8 x half> @llvm.arm.mve.fltnarrow.predicated(<8 x half> %a, <4 x float> %b, i32 1, <4 x i1> %1) + ret <8 x half> %2 +} + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) + +declare <8 x half> @llvm.arm.mve.fltnarrow.predicated(<8 x half>, <4 x float>, i32, <4 x i1>) diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vld24.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vld24.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vld24.ll @@ -0,0 +1,91 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -o - %s | FileCheck %s + +%struct.float16x8x2_t = type { [2 x <8 x half>] } +%struct.uint8x16x4_t = type { [4 x <16 x i8>] } +%struct.uint32x4x2_t = type { [2 x <4 x i32>] } +%struct.int8x16x4_t = type { [4 x <16 x i8>] } + +define arm_aapcs_vfpcc %struct.float16x8x2_t @test_vld2q_f16(half* %addr) { +; CHECK-LABEL: test_vld2q_f16: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vld20.16 {q0, q1}, [r0] +; CHECK-NEXT: vld21.16 {q0, q1}, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = tail call { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0f16(half* %addr) + %1 = extractvalue { <8 x half>, <8 x half> } %0, 0 + %2 = insertvalue %struct.float16x8x2_t undef, <8 x half> %1, 0, 0 + %3 = extractvalue { <8 x half>, <8 x half> } %0, 1 + %4 = insertvalue %struct.float16x8x2_t %2, <8 x half> %3, 0, 1 + ret %struct.float16x8x2_t %4 +} + +declare { <8 x half>, <8 x half> } @llvm.arm.mve.vld2q.v8f16.p0f16(half*) + +define arm_aapcs_vfpcc %struct.uint8x16x4_t @test_vld4q_u8(i8* %addr) { +; CHECK-LABEL: test_vld4q_u8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vld40.8 {q0, q1, q2, q3}, [r0] +; CHECK-NEXT: vld41.8 {q0, q1, q2, q3}, [r0] +; CHECK-NEXT: vld42.8 {q0, q1, q2, q3}, [r0] +; CHECK-NEXT: vld43.8 {q0, q1, q2, q3}, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = tail call { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.mve.vld4q.v16i8.p0i8(i8* %addr) + %1 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %0, 0 + %2 = insertvalue %struct.uint8x16x4_t undef, <16 x i8> %1, 0, 0 + %3 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %0, 1 + %4 = insertvalue %struct.uint8x16x4_t %2, <16 x i8> %3, 0, 1 + %5 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %0, 2 + %6 = insertvalue %struct.uint8x16x4_t %4, <16 x i8> %5, 0, 2 + %7 = extractvalue { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } %0, 3 + %8 = insertvalue %struct.uint8x16x4_t %6, <16 x i8> %7, 0, 3 + ret %struct.uint8x16x4_t %8 +} + +declare { <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8> } @llvm.arm.mve.vld4q.v16i8.p0i8(i8*) + +define arm_aapcs_vfpcc void @test_vst2q_u32(i32* %addr, %struct.uint32x4x2_t %value.coerce) { +; CHECK-LABEL: test_vst2q_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: @ kill: def $q1 killed $q1 killed $q0_q1 def $q0_q1 +; CHECK-NEXT: @ kill: def $q0 killed $q0 killed $q0_q1 def $q0_q1 +; CHECK-NEXT: vst20.32 {q0, q1}, [r0] +; CHECK-NEXT: vst21.32 {q0, q1}, [r0] +; CHECK-NEXT: bx lr +entry: + %value.coerce.fca.0.0.extract = extractvalue %struct.uint32x4x2_t %value.coerce, 0, 0 + %value.coerce.fca.0.1.extract = extractvalue %struct.uint32x4x2_t %value.coerce, 0, 1 + tail call void @llvm.arm.mve.vst2q.p0i32.v4i32(i32* %addr, <4 x i32> %value.coerce.fca.0.0.extract, <4 x i32> %value.coerce.fca.0.1.extract, i32 0) + tail call void @llvm.arm.mve.vst2q.p0i32.v4i32(i32* %addr, <4 x i32> %value.coerce.fca.0.0.extract, <4 x i32> %value.coerce.fca.0.1.extract, i32 1) + ret void +} + +declare void @llvm.arm.mve.vst2q.p0i32.v4i32(i32*, <4 x i32>, <4 x i32>, i32) + +define arm_aapcs_vfpcc void @test_vst4q_s8(i8* %addr, %struct.int8x16x4_t %value.coerce) { +; CHECK-LABEL: test_vst4q_s8: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: @ kill: def $q3 killed $q3 killed $q0_q1_q2_q3 def $q0_q1_q2_q3 +; CHECK-NEXT: @ kill: def $q2 killed $q2 killed $q0_q1_q2_q3 def $q0_q1_q2_q3 +; CHECK-NEXT: @ kill: def $q1 killed $q1 killed $q0_q1_q2_q3 def $q0_q1_q2_q3 +; CHECK-NEXT: @ kill: def $q0 killed $q0 killed $q0_q1_q2_q3 def $q0_q1_q2_q3 +; CHECK-NEXT: vst40.8 {q0, q1, q2, q3}, [r0] +; CHECK-NEXT: vst41.8 {q0, q1, q2, q3}, [r0] +; CHECK-NEXT: vst42.8 {q0, q1, q2, q3}, [r0] +; CHECK-NEXT: vst43.8 {q0, q1, q2, q3}, [r0] +; CHECK-NEXT: bx lr +entry: + %value.coerce.fca.0.0.extract = extractvalue %struct.int8x16x4_t %value.coerce, 0, 0 + %value.coerce.fca.0.1.extract = extractvalue %struct.int8x16x4_t %value.coerce, 0, 1 + %value.coerce.fca.0.2.extract = extractvalue %struct.int8x16x4_t %value.coerce, 0, 2 + %value.coerce.fca.0.3.extract = extractvalue %struct.int8x16x4_t %value.coerce, 0, 3 + tail call void @llvm.arm.mve.vst4q.p0i8.v16i8(i8* %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 0) + tail call void @llvm.arm.mve.vst4q.p0i8.v16i8(i8* %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 1) + tail call void @llvm.arm.mve.vst4q.p0i8.v16i8(i8* %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 2) + tail call void @llvm.arm.mve.vst4q.p0i8.v16i8(i8* %addr, <16 x i8> %value.coerce.fca.0.0.extract, <16 x i8> %value.coerce.fca.0.1.extract, <16 x i8> %value.coerce.fca.0.2.extract, <16 x i8> %value.coerce.fca.0.3.extract, i32 3) + ret void +} + +declare void @llvm.arm.mve.vst4q.p0i8.v16i8(i8*, <16 x i8>, <16 x i8>, <16 x i8>, <16 x i8>, i32) diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vldr.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vldr.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vldr.ll @@ -0,0 +1,62 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -o - %s | FileCheck %s + +define arm_aapcs_vfpcc <4 x i32> @test_vldrwq_gather_base_wb_s32(<4 x i32>* %addr) { +; CHECK-LABEL: test_vldrwq_gather_base_wb_s32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: vldrw.u32 q1, [q0, #80]! +; CHECK-NEXT: vstrw.32 q1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load <4 x i32>, <4 x i32>* %addr, align 8 + %1 = tail call { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32> %0, i32 80) + %2 = extractvalue { <4 x i32>, <4 x i32> } %1, 1 + store <4 x i32> %2, <4 x i32>* %addr, align 8 + %3 = extractvalue { <4 x i32>, <4 x i32> } %1, 0 + ret <4 x i32> %3 +} + +declare { <4 x i32>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4i32.v4i32(<4 x i32>, i32) + +define arm_aapcs_vfpcc <4 x float> @test_vldrwq_gather_base_wb_f32(<4 x i32>* %addr) { +; CHECK-LABEL: test_vldrwq_gather_base_wb_f32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: vldrw.u32 q1, [q0, #64]! +; CHECK-NEXT: vstrw.32 q1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load <4 x i32>, <4 x i32>* %addr, align 8 + %1 = tail call { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32> %0, i32 64) + %2 = extractvalue { <4 x float>, <4 x i32> } %1, 1 + store <4 x i32> %2, <4 x i32>* %addr, align 8 + %3 = extractvalue { <4 x float>, <4 x i32> } %1, 0 + ret <4 x float> %3 +} + +declare { <4 x float>, <4 x i32> } @llvm.arm.mve.vldr.gather.base.wb.v4f32.v4i32(<4 x i32>, i32) + +define arm_aapcs_vfpcc <2 x i64> @test_vldrdq_gather_base_wb_z_u64(<2 x i64>* %addr, i16 zeroext %p) { +; CHECK-LABEL: test_vldrdq_gather_base_wb_z_u64: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vmsr p0, r1 +; CHECK-NEXT: vldrw.u32 q0, [r0] +; CHECK-NEXT: vpst +; CHECK-NEXT: vldrdt.u64 q1, [q0, #656]! +; CHECK-NEXT: vstrw.32 q1, [r0] +; CHECK-NEXT: bx lr +entry: + %0 = load <2 x i64>, <2 x i64>* %addr, align 8 + %1 = zext i16 %p to i32 + %2 = tail call <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32 %1) + %3 = tail call { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1(<2 x i64> %0, i32 656, <4 x i1> %2) + %4 = extractvalue { <2 x i64>, <2 x i64> } %3, 1 + store <2 x i64> %4, <2 x i64>* %addr, align 8 + %5 = extractvalue { <2 x i64>, <2 x i64> } %3, 0 + ret <2 x i64> %5 +} + +declare <4 x i1> @llvm.arm.mve.pred.i2v.v4i1(i32) + +declare { <2 x i64>, <2 x i64> } @llvm.arm.mve.vldr.gather.base.wb.predicated.v2i64.v2i64.v4i1(<2 x i64>, i32, <4 x i1>) diff --git a/llvm/test/CodeGen/Thumb2/mve-intrinsics/vminvq.ll b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vminvq.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/Thumb2/mve-intrinsics/vminvq.ll @@ -0,0 +1,14 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=thumbv8.1m.main -mattr=+mve.fp -o - %s | FileCheck %s + +define arm_aapcs_vfpcc i32 @test_vminvq_u32(i32 %a, <4 x i32> %b) { +; CHECK-LABEL: test_vminvq_u32: +; CHECK: @ %bb.0: @ %entry +; CHECK-NEXT: vminv.u8 r0, q0 +; CHECK-NEXT: bx lr +entry: + %0 = tail call i32 @llvm.arm.mve.minv.u.v4i32(i32 %a, <4 x i32> %b) + ret i32 %0 +} + +declare i32 @llvm.arm.mve.minv.u.v4i32(i32, <4 x i32>)