Index: llvm/include/llvm/IR/IntrinsicsAArch64.td =================================================================== --- llvm/include/llvm/IR/IntrinsicsAArch64.td +++ llvm/include/llvm/IR/IntrinsicsAArch64.td @@ -790,6 +790,12 @@ llvm_i32_ty], [IntrNoMem]>; + class AdvSIMD_Pred1VectorArg_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMMatchType<0>], + [IntrNoMem]>; + class AdvSIMD_Pred2VectorArg_Intrinsic : Intrinsic<[llvm_anyvector_ty], [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, @@ -826,13 +832,20 @@ llvm_anyvector_ty], [IntrNoMem]>; - class AdvSIMD_SVE_Reduce_Intrinsic + class AdvSIMD_SVE_FP_Reduce_Intrinsic : Intrinsic<[llvm_anyfloat_ty], [LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, llvm_anyvector_ty], [IntrNoMem]>; class AdvSIMD_SVE_ReduceWithInit_Intrinsic + : Intrinsic<[LLVMVectorElementType<0>], + [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + LLVMVectorElementType<0>, + llvm_anyvector_ty], + [IntrNoMem]>; + + class AdvSIMD_SVE_FP_ReduceWithInit_Intrinsic : Intrinsic<[llvm_anyfloat_ty], [LLVMScalarOrSameVectorWidth<1, llvm_i1_ty>, LLVMMatchType<0>, @@ -959,6 +972,12 @@ llvm_i32_ty], [IntrNoMem]>; + class AdvSIMD_SVE_TBL_Intrinsic + : Intrinsic<[llvm_anyvector_ty], + [LLVMMatchType<0>, + LLVMVectorOfBitcastsToInt<0>], + [IntrNoMem]>; + class SVE2_3VectorArg_Long_Intrinsic : Intrinsic<[llvm_anyvector_ty], [LLVMMatchType<0>, @@ -1001,7 +1020,7 @@ let TargetPrefix = "aarch64" in { // All intrinsics start with "llvm.aarch64.". -class AdvSIMD_SVE_Int_Reduce_Intrinsic +class AdvSIMD_SVE_Reduce_Intrinsic : Intrinsic<[LLVMVectorElementType<0>], [LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyvector_ty], @@ -1076,14 +1095,14 @@ def int_aarch64_sve_saddv : AdvSIMD_SVE_SADDV_Reduce_Intrinsic; def int_aarch64_sve_uaddv : AdvSIMD_SVE_SADDV_Reduce_Intrinsic; -def int_aarch64_sve_smaxv : AdvSIMD_SVE_Int_Reduce_Intrinsic; -def int_aarch64_sve_umaxv : AdvSIMD_SVE_Int_Reduce_Intrinsic; -def int_aarch64_sve_sminv : AdvSIMD_SVE_Int_Reduce_Intrinsic; -def int_aarch64_sve_uminv : AdvSIMD_SVE_Int_Reduce_Intrinsic; +def int_aarch64_sve_smaxv : AdvSIMD_SVE_Reduce_Intrinsic; +def int_aarch64_sve_umaxv : AdvSIMD_SVE_Reduce_Intrinsic; +def int_aarch64_sve_sminv : AdvSIMD_SVE_Reduce_Intrinsic; +def int_aarch64_sve_uminv : AdvSIMD_SVE_Reduce_Intrinsic; -def int_aarch64_sve_orv : AdvSIMD_SVE_Int_Reduce_Intrinsic; -def int_aarch64_sve_eorv : AdvSIMD_SVE_Int_Reduce_Intrinsic; -def int_aarch64_sve_andv : AdvSIMD_SVE_Int_Reduce_Intrinsic; +def int_aarch64_sve_orv : AdvSIMD_SVE_Reduce_Intrinsic; +def int_aarch64_sve_eorv : AdvSIMD_SVE_Reduce_Intrinsic; +def int_aarch64_sve_andv : AdvSIMD_SVE_Reduce_Intrinsic; def int_aarch64_sve_abs : AdvSIMD_Merged1VectorArg_Intrinsic; def int_aarch64_sve_neg : AdvSIMD_Merged1VectorArg_Intrinsic; @@ -1159,11 +1178,27 @@ // Permutations and selection // +def int_aarch64_sve_clasta : AdvSIMD_Pred2VectorArg_Intrinsic; +def int_aarch64_sve_clasta_n : AdvSIMD_SVE_ReduceWithInit_Intrinsic; +def int_aarch64_sve_clastb : AdvSIMD_Pred2VectorArg_Intrinsic; +def int_aarch64_sve_clastb_n : AdvSIMD_SVE_ReduceWithInit_Intrinsic; +def int_aarch64_sve_compact : AdvSIMD_Pred1VectorArg_Intrinsic; +def int_aarch64_sve_ext : AdvSIMD_2VectorArgIndexed_Intrinsic; +def int_aarch64_sve_lasta : AdvSIMD_SVE_Reduce_Intrinsic; +def int_aarch64_sve_lastb : AdvSIMD_SVE_Reduce_Intrinsic; +def int_aarch64_sve_rev : AdvSIMD_1VectorArg_Intrinsic; +def int_aarch64_sve_splice : AdvSIMD_Pred2VectorArg_Intrinsic; def int_aarch64_sve_sunpkhi : AdvSIMD_SVE_Unpack_Intrinsic; def int_aarch64_sve_sunpklo : AdvSIMD_SVE_Unpack_Intrinsic; - +def int_aarch64_sve_tbl : AdvSIMD_SVE_TBL_Intrinsic; +def int_aarch64_sve_trn1 : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_trn2 : AdvSIMD_2VectorArg_Intrinsic; def int_aarch64_sve_uunpkhi : AdvSIMD_SVE_Unpack_Intrinsic; def int_aarch64_sve_uunpklo : AdvSIMD_SVE_Unpack_Intrinsic; +def int_aarch64_sve_uzp1 : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_uzp2 : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_zip1 : AdvSIMD_2VectorArg_Intrinsic; +def int_aarch64_sve_zip2 : AdvSIMD_2VectorArg_Intrinsic; // // Logical operations @@ -1270,12 +1305,12 @@ // Floating-point reductions // -def int_aarch64_sve_fadda : AdvSIMD_SVE_ReduceWithInit_Intrinsic; -def int_aarch64_sve_faddv : AdvSIMD_SVE_Reduce_Intrinsic; -def int_aarch64_sve_fmaxv : AdvSIMD_SVE_Reduce_Intrinsic; -def int_aarch64_sve_fmaxnmv : AdvSIMD_SVE_Reduce_Intrinsic; -def int_aarch64_sve_fminv : AdvSIMD_SVE_Reduce_Intrinsic; -def int_aarch64_sve_fminnmv : AdvSIMD_SVE_Reduce_Intrinsic; +def int_aarch64_sve_fadda : AdvSIMD_SVE_FP_ReduceWithInit_Intrinsic; +def int_aarch64_sve_faddv : AdvSIMD_SVE_FP_Reduce_Intrinsic; +def int_aarch64_sve_fmaxv : AdvSIMD_SVE_FP_Reduce_Intrinsic; +def int_aarch64_sve_fmaxnmv : AdvSIMD_SVE_FP_Reduce_Intrinsic; +def int_aarch64_sve_fminv : AdvSIMD_SVE_FP_Reduce_Intrinsic; +def int_aarch64_sve_fminnmv : AdvSIMD_SVE_FP_Reduce_Intrinsic; // // Floating-point conversions Index: llvm/lib/Target/AArch64/AArch64ISelLowering.h =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.h +++ llvm/lib/Target/AArch64/AArch64ISelLowering.h @@ -204,6 +204,13 @@ UUNPKHI, UUNPKLO, + CLASTA_N, + CLASTB_N, + LASTA, + LASTB, + REV, + TBL, + INSR, // Unsigned gather loads. Index: llvm/lib/Target/AArch64/AArch64ISelLowering.cpp =================================================================== --- llvm/lib/Target/AArch64/AArch64ISelLowering.cpp +++ llvm/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -1288,6 +1288,12 @@ case AArch64ISD::ORV_PRED: return "AArch64ISD::ORV_PRED"; case AArch64ISD::EORV_PRED: return "AArch64ISD::EORV_PRED"; case AArch64ISD::ANDV_PRED: return "AArch64ISD::ANDV_PRED"; + case AArch64ISD::CLASTA_N: return "AArch64ISD::CLASTA_N"; + case AArch64ISD::CLASTB_N: return "AArch64ISD::CLASTB_N"; + case AArch64ISD::LASTA: return "AArch64ISD::LASTA"; + case AArch64ISD::LASTB: return "AArch64ISD::LASTB"; + case AArch64ISD::REV: return "AArch64ISD::REV"; + case AArch64ISD::TBL: return "AArch64ISD::TBL"; case AArch64ISD::NOT: return "AArch64ISD::NOT"; case AArch64ISD::BIT: return "AArch64ISD::BIT"; case AArch64ISD::CBZ: return "AArch64ISD::CBZ"; @@ -2907,6 +2913,42 @@ case Intrinsic::aarch64_sve_uunpklo: return DAG.getNode(AArch64ISD::UUNPKLO, dl, Op.getValueType(), Op.getOperand(1)); + case Intrinsic::aarch64_sve_clasta_n: + return DAG.getNode(AArch64ISD::CLASTA_N, dl, Op.getValueType(), + Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); + case Intrinsic::aarch64_sve_clastb_n: + return DAG.getNode(AArch64ISD::CLASTB_N, dl, Op.getValueType(), + Op.getOperand(1), Op.getOperand(2), Op.getOperand(3)); + case Intrinsic::aarch64_sve_lasta: + return DAG.getNode(AArch64ISD::LASTA, dl, Op.getValueType(), + Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::aarch64_sve_lastb: + return DAG.getNode(AArch64ISD::LASTB, dl, Op.getValueType(), + Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::aarch64_sve_rev: + return DAG.getNode(AArch64ISD::REV, dl, Op.getValueType(), + Op.getOperand(1)); + case Intrinsic::aarch64_sve_tbl: + return DAG.getNode(AArch64ISD::TBL, dl, Op.getValueType(), + Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::aarch64_sve_trn1: + return DAG.getNode(AArch64ISD::TRN1, dl, Op.getValueType(), + Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::aarch64_sve_trn2: + return DAG.getNode(AArch64ISD::TRN2, dl, Op.getValueType(), + Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::aarch64_sve_uzp1: + return DAG.getNode(AArch64ISD::UZP1, dl, Op.getValueType(), + Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::aarch64_sve_uzp2: + return DAG.getNode(AArch64ISD::UZP2, dl, Op.getValueType(), + Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::aarch64_sve_zip1: + return DAG.getNode(AArch64ISD::ZIP1, dl, Op.getValueType(), + Op.getOperand(1), Op.getOperand(2)); + case Intrinsic::aarch64_sve_zip2: + return DAG.getNode(AArch64ISD::ZIP2, dl, Op.getValueType(), + Op.getOperand(1), Op.getOperand(2)); case Intrinsic::aarch64_sve_insr: { SDValue Scalar = Op.getOperand(2); @@ -10631,6 +10673,31 @@ return SDValue(); } +static SDValue LowerSVEIntrinsicEXT(SDNode *N, SelectionDAG &DAG) { + SDLoc dl(N); + LLVMContext &Ctx = *DAG.getContext(); + EVT VT = N->getValueType(0); + + assert(VT.isScalableVector() && "Expected a scalable vector."); + + // Current lowering only supports the SVE-ACLE types. + if (VT.getSizeInBits() != AArch64::SVEBitsPerBlock) + return SDValue(); + + unsigned ElemSize = VT.getVectorElementType().getSizeInBits() / 8; + unsigned ByteSize = VT.getSizeInBits() / 8; + EVT ByteVT = EVT::getVectorVT(Ctx, MVT::i8, { ByteSize, true }); + + // Convert everything to the domain of EXT (i.e bytes). + SDValue Op0 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(1)); + SDValue Op1 = DAG.getNode(ISD::BITCAST, dl, ByteVT, N->getOperand(2)); + SDValue Op2 = DAG.getNode(ISD::MUL, dl, MVT::i32, N->getOperand(3), + DAG.getConstant(ElemSize, dl, MVT::i32)); + + SDValue EXT = DAG.getNode(AArch64ISD::EXT, dl, ByteVT, Op0, Op1, Op2); + return DAG.getNode(ISD::BITCAST, dl, VT, EXT); +} + static SDValue tryConvertSVEWideCompare(SDNode *N, unsigned ReplacementIID, bool Invert, TargetLowering::DAGCombinerInfo &DCI, @@ -10770,6 +10837,8 @@ return LowerSVEIntReduction(N, AArch64ISD::EORV_PRED, DAG); case Intrinsic::aarch64_sve_andv: return LowerSVEIntReduction(N, AArch64ISD::ANDV_PRED, DAG); + case Intrinsic::aarch64_sve_ext: + return LowerSVEIntrinsicEXT(N, DAG); case Intrinsic::aarch64_sve_cmpeq_wide: return tryConvertSVEWideCompare(N, Intrinsic::aarch64_sve_cmpeq, false, DCI, DAG); @@ -12527,6 +12596,48 @@ case ISD::ATOMIC_CMP_SWAP: ReplaceCMP_SWAP_128Results(N, Results, DAG, Subtarget); return; + case ISD::INTRINSIC_WO_CHAIN: { + EVT VT = N->getValueType(0); + assert((VT == MVT::i8 || VT == MVT::i16) && + "custom lowering for unexpected type"); + + ConstantSDNode *CN = cast(N->getOperand(0)); + Intrinsic::ID IntID = static_cast(CN->getZExtValue()); + switch (IntID) { + default: + return; + case Intrinsic::aarch64_sve_clasta_n: { + SDLoc DL(N); + auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2)); + auto V = DAG.getNode(AArch64ISD::CLASTA_N, DL, MVT::i32, + N->getOperand(1), Op2, N->getOperand(3)); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V)); + return; + } + case Intrinsic::aarch64_sve_clastb_n: { + SDLoc DL(N); + auto Op2 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, N->getOperand(2)); + auto V = DAG.getNode(AArch64ISD::CLASTB_N, DL, MVT::i32, + N->getOperand(1), Op2, N->getOperand(3)); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V)); + return; + } + case Intrinsic::aarch64_sve_lasta: { + SDLoc DL(N); + auto V = DAG.getNode(AArch64ISD::LASTA, DL, MVT::i32, + N->getOperand(1), N->getOperand(2)); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V)); + return; + } + case Intrinsic::aarch64_sve_lastb: { + SDLoc DL(N); + auto V = DAG.getNode(AArch64ISD::LASTB, DL, MVT::i32, + N->getOperand(1), N->getOperand(2)); + Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, V)); + return; + } + } + } } } Index: llvm/lib/Target/AArch64/AArch64InstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64InstrInfo.td +++ llvm/lib/Target/AArch64/AArch64InstrInfo.td @@ -260,6 +260,10 @@ SDTCisSameAs<1, 2>, SDTCisSameAs<1, 3>, SDTCisSameAs<1, 4>]>; +def SDT_AArch64TBL : SDTypeProfile<1, 2, [ + SDTCisVec<0>, SDTCisSameAs<0, 1>, SDTCisInt<2> +]>; + // non-extending masked load fragment. def nonext_masked_load : PatFrag<(ops node:$ptr, node:$pred, node:$def), @@ -517,6 +521,8 @@ def AArch64uunpkhi : SDNode<"AArch64ISD::UUNPKHI", SDT_AArch64unpk>; def AArch64uunpklo : SDNode<"AArch64ISD::UUNPKLO", SDT_AArch64unpk>; +def AArch64tbl : SDNode<"AArch64ISD::TBL", SDT_AArch64TBL>; + //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// Index: llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td =================================================================== --- llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -45,6 +45,15 @@ def AArch64orv_pred : SDNode<"AArch64ISD::ORV_PRED", SDT_AArch64Reduce>; def AArch64eorv_pred : SDNode<"AArch64ISD::EORV_PRED", SDT_AArch64Reduce>; def AArch64andv_pred : SDNode<"AArch64ISD::ANDV_PRED", SDT_AArch64Reduce>; +def AArch64lasta : SDNode<"AArch64ISD::LASTA", SDT_AArch64Reduce>; +def AArch64lastb : SDNode<"AArch64ISD::LASTB", SDT_AArch64Reduce>; + +def SDT_AArch64ReduceWithInit : SDTypeProfile<1, 3, [SDTCisVec<1>, SDTCisVec<3>]>; +def AArch64clasta_n : SDNode<"AArch64ISD::CLASTA_N", SDT_AArch64ReduceWithInit>; +def AArch64clastb_n : SDNode<"AArch64ISD::CLASTB_N", SDT_AArch64ReduceWithInit>; + +def SDT_AArch64Rev : SDTypeProfile<1, 1, [SDTCisVec<0>, SDTCisSameAs<0,1>]>; +def AArch64rev : SDNode<"AArch64ISD::REV", SDT_AArch64Rev>; let Predicates = [HasSVE] in { @@ -233,19 +242,19 @@ // Select elements from either vector (predicated) defm SEL_ZPZZ : sve_int_sel_vvv<"sel", vselect>; - defm SPLICE_ZPZ : sve_int_perm_splice<"splice">; - defm COMPACT_ZPZ : sve_int_perm_compact<"compact">; + defm SPLICE_ZPZ : sve_int_perm_splice<"splice", int_aarch64_sve_splice>; + defm COMPACT_ZPZ : sve_int_perm_compact<"compact", int_aarch64_sve_compact>; defm INSR_ZR : sve_int_perm_insrs<"insr", AArch64insr>; defm INSR_ZV : sve_int_perm_insrv<"insr", AArch64insr>; - def EXT_ZZI : sve_int_perm_extract_i<"ext">; + def EXT_ZZI : sve_int_perm_extract_i<"ext", AArch64ext>; defm RBIT_ZPmZ : sve_int_perm_rev_rbit<"rbit", int_aarch64_sve_rbit>; defm REVB_ZPmZ : sve_int_perm_rev_revb<"revb", int_aarch64_sve_revb, bswap>; defm REVH_ZPmZ : sve_int_perm_rev_revh<"revh", int_aarch64_sve_revh>; defm REVW_ZPmZ : sve_int_perm_rev_revw<"revw", int_aarch64_sve_revw>; - defm REV_PP : sve_int_perm_reverse_p<"rev">; - defm REV_ZZ : sve_int_perm_reverse_z<"rev">; + defm REV_PP : sve_int_perm_reverse_p<"rev", AArch64rev>; + defm REV_ZZ : sve_int_perm_reverse_z<"rev", AArch64rev>; defm SUNPKLO_ZZ : sve_int_perm_unpk<0b00, "sunpklo", AArch64sunpklo>; defm SUNPKHI_ZZ : sve_int_perm_unpk<0b01, "sunpkhi", AArch64sunpkhi>; @@ -296,17 +305,17 @@ defm NORS_PPzPP : sve_int_pred_log<0b1110, "nors", int_aarch64_sve_nors>; defm NANDS_PPzPP : sve_int_pred_log<0b1111, "nands", int_aarch64_sve_nands>; - defm CLASTA_RPZ : sve_int_perm_clast_rz<0, "clasta">; - defm CLASTB_RPZ : sve_int_perm_clast_rz<1, "clastb">; - defm CLASTA_VPZ : sve_int_perm_clast_vz<0, "clasta">; - defm CLASTB_VPZ : sve_int_perm_clast_vz<1, "clastb">; - defm CLASTA_ZPZ : sve_int_perm_clast_zz<0, "clasta">; - defm CLASTB_ZPZ : sve_int_perm_clast_zz<1, "clastb">; + defm CLASTA_RPZ : sve_int_perm_clast_rz<0, "clasta", AArch64clasta_n>; + defm CLASTB_RPZ : sve_int_perm_clast_rz<1, "clastb", AArch64clastb_n>; + defm CLASTA_VPZ : sve_int_perm_clast_vz<0, "clasta", AArch64clasta_n>; + defm CLASTB_VPZ : sve_int_perm_clast_vz<1, "clastb", AArch64clastb_n>; + defm CLASTA_ZPZ : sve_int_perm_clast_zz<0, "clasta", int_aarch64_sve_clasta>; + defm CLASTB_ZPZ : sve_int_perm_clast_zz<1, "clastb", int_aarch64_sve_clastb>; - defm LASTA_RPZ : sve_int_perm_last_r<0, "lasta">; - defm LASTB_RPZ : sve_int_perm_last_r<1, "lastb">; - defm LASTA_VPZ : sve_int_perm_last_v<0, "lasta">; - defm LASTB_VPZ : sve_int_perm_last_v<1, "lastb">; + defm LASTA_RPZ : sve_int_perm_last_r<0, "lasta", AArch64lasta>; + defm LASTB_RPZ : sve_int_perm_last_r<1, "lastb", AArch64lastb>; + defm LASTA_VPZ : sve_int_perm_last_v<0, "lasta", AArch64lasta>; + defm LASTB_VPZ : sve_int_perm_last_v<1, "lastb", AArch64lastb>; // continuous load with reg+immediate defm LD1B_IMM : sve_mem_cld_si<0b0000, "ld1b", Z_b, ZPR8>; @@ -727,21 +736,21 @@ defm ADR_LSL_ZZZ_S : sve_int_bin_cons_misc_0_a_32_lsl<0b10, "adr">; defm ADR_LSL_ZZZ_D : sve_int_bin_cons_misc_0_a_64_lsl<0b11, "adr">; - defm TBL_ZZZ : sve_int_perm_tbl<"tbl">; - - defm ZIP1_ZZZ : sve_int_perm_bin_perm_zz<0b000, "zip1">; - defm ZIP2_ZZZ : sve_int_perm_bin_perm_zz<0b001, "zip2">; - defm UZP1_ZZZ : sve_int_perm_bin_perm_zz<0b010, "uzp1">; - defm UZP2_ZZZ : sve_int_perm_bin_perm_zz<0b011, "uzp2">; - defm TRN1_ZZZ : sve_int_perm_bin_perm_zz<0b100, "trn1">; - defm TRN2_ZZZ : sve_int_perm_bin_perm_zz<0b101, "trn2">; - - defm ZIP1_PPP : sve_int_perm_bin_perm_pp<0b000, "zip1">; - defm ZIP2_PPP : sve_int_perm_bin_perm_pp<0b001, "zip2">; - defm UZP1_PPP : sve_int_perm_bin_perm_pp<0b010, "uzp1">; - defm UZP2_PPP : sve_int_perm_bin_perm_pp<0b011, "uzp2">; - defm TRN1_PPP : sve_int_perm_bin_perm_pp<0b100, "trn1">; - defm TRN2_PPP : sve_int_perm_bin_perm_pp<0b101, "trn2">; + defm TBL_ZZZ : sve_int_perm_tbl<"tbl", AArch64tbl>; + + defm ZIP1_ZZZ : sve_int_perm_bin_perm_zz<0b000, "zip1", AArch64zip1>; + defm ZIP2_ZZZ : sve_int_perm_bin_perm_zz<0b001, "zip2", AArch64zip2>; + defm UZP1_ZZZ : sve_int_perm_bin_perm_zz<0b010, "uzp1", AArch64uzp1>; + defm UZP2_ZZZ : sve_int_perm_bin_perm_zz<0b011, "uzp2", AArch64uzp2>; + defm TRN1_ZZZ : sve_int_perm_bin_perm_zz<0b100, "trn1", AArch64trn1>; + defm TRN2_ZZZ : sve_int_perm_bin_perm_zz<0b101, "trn2", AArch64trn2>; + + defm ZIP1_PPP : sve_int_perm_bin_perm_pp<0b000, "zip1", AArch64zip1>; + defm ZIP2_PPP : sve_int_perm_bin_perm_pp<0b001, "zip2", AArch64zip2>; + defm UZP1_PPP : sve_int_perm_bin_perm_pp<0b010, "uzp1", AArch64uzp1>; + defm UZP2_PPP : sve_int_perm_bin_perm_pp<0b011, "uzp2", AArch64uzp2>; + defm TRN1_PPP : sve_int_perm_bin_perm_pp<0b100, "trn1", AArch64trn1>; + defm TRN2_PPP : sve_int_perm_bin_perm_pp<0b101, "trn2", AArch64trn2>; defm CMPHS_PPzZZ : sve_int_cmp_0<0b000, "cmphs", int_aarch64_sve_cmphs, SETUGE>; defm CMPHI_PPzZZ : sve_int_cmp_0<0b001, "cmphi", int_aarch64_sve_cmphi, SETUGT>; Index: llvm/lib/Target/AArch64/SVEInstrFormats.td =================================================================== --- llvm/lib/Target/AArch64/SVEInstrFormats.td +++ llvm/lib/Target/AArch64/SVEInstrFormats.td @@ -789,7 +789,7 @@ let Inst{4-0} = Zd; } -multiclass sve_int_perm_tbl { +multiclass sve_int_perm_tbl { def _B : sve_int_perm_tbl<0b00, 0b10, asm, ZPR8, Z_b>; def _H : sve_int_perm_tbl<0b01, 0b10, asm, ZPR16, Z_h>; def _S : sve_int_perm_tbl<0b10, 0b10, asm, ZPR32, Z_s>; @@ -803,6 +803,15 @@ (!cast(NAME # _S) ZPR32:$Zd, ZPR32:$Zn, ZPR32:$Zm), 0>; def : InstAlias(NAME # _D) ZPR64:$Zd, ZPR64:$Zn, ZPR64:$Zm), 0>; + + def : SVE_2_Op_Pat(NAME # _B)>; + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; + + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; } multiclass sve2_int_perm_tbl { @@ -852,11 +861,20 @@ let Inst{4-0} = Zd; } -multiclass sve_int_perm_reverse_z { +multiclass sve_int_perm_reverse_z { def _B : sve_int_perm_reverse_z<0b00, asm, ZPR8>; def _H : sve_int_perm_reverse_z<0b01, asm, ZPR16>; def _S : sve_int_perm_reverse_z<0b10, asm, ZPR32>; def _D : sve_int_perm_reverse_z<0b11, asm, ZPR64>; + + def : SVE_1_Op_Pat(NAME # _B)>; + def : SVE_1_Op_Pat(NAME # _H)>; + def : SVE_1_Op_Pat(NAME # _S)>; + def : SVE_1_Op_Pat(NAME # _D)>; + + def : SVE_1_Op_Pat(NAME # _H)>; + def : SVE_1_Op_Pat(NAME # _S)>; + def : SVE_1_Op_Pat(NAME # _D)>; } class sve_int_perm_reverse_p sz8_64, string asm, PPRRegOp pprty> @@ -874,11 +892,16 @@ let Inst{3-0} = Pd; } -multiclass sve_int_perm_reverse_p { +multiclass sve_int_perm_reverse_p { def _B : sve_int_perm_reverse_p<0b00, asm, PPR8>; def _H : sve_int_perm_reverse_p<0b01, asm, PPR16>; def _S : sve_int_perm_reverse_p<0b10, asm, PPR32>; def _D : sve_int_perm_reverse_p<0b11, asm, PPR64>; + + def : SVE_1_Op_Pat(NAME # _B)>; + def : SVE_1_Op_Pat(NAME # _H)>; + def : SVE_1_Op_Pat(NAME # _S)>; + def : SVE_1_Op_Pat(NAME # _D)>; } class sve_int_perm_unpk sz16_64, bits<2> opc, string asm, @@ -970,10 +993,12 @@ // SVE Permute - Extract Group //===----------------------------------------------------------------------===// -class sve_int_perm_extract_i +class sve_int_perm_extract_i : I<(outs ZPR8:$Zdn), (ins ZPR8:$_Zdn, ZPR8:$Zm, imm0_255:$imm8), asm, "\t$Zdn, $_Zdn, $Zm, $imm8", - "", []>, Sched<[]> { + "", + [(set ZPR8:$Zdn, (nxv16i8 (op (nxv16i8 ZPR8:$_Zdn), (nxv16i8 ZPR8:$Zm), + (imm0_255:$imm8))))]>, Sched<[]> { bits<5> Zdn; bits<5> Zm; bits<8> imm8; @@ -1881,11 +1906,22 @@ let Inst{4-0} = Zd; } -multiclass sve_int_perm_bin_perm_zz opc, string asm> { +multiclass sve_int_perm_bin_perm_zz opc, string asm, + SDPatternOperator op> { def _B : sve_int_perm_bin_perm_zz; def _H : sve_int_perm_bin_perm_zz; def _S : sve_int_perm_bin_perm_zz; def _D : sve_int_perm_bin_perm_zz; + + def : SVE_2_Op_Pat(NAME # _B)>; + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; + + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; } //===----------------------------------------------------------------------===// @@ -4727,11 +4763,17 @@ let Inst{3-0} = Pd; } -multiclass sve_int_perm_bin_perm_pp opc, string asm> { +multiclass sve_int_perm_bin_perm_pp opc, string asm, + SDPatternOperator op> { def _B : sve_int_perm_bin_perm_pp; def _H : sve_int_perm_bin_perm_pp; def _S : sve_int_perm_bin_perm_pp; def _D : sve_int_perm_bin_perm_pp; + + def : SVE_2_Op_Pat(NAME # _B)>; + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; } class sve_int_perm_punpk @@ -4837,11 +4879,16 @@ let Constraints = "$Rdn = $_Rdn"; } -multiclass sve_int_perm_clast_rz { +multiclass sve_int_perm_clast_rz { def _B : sve_int_perm_clast_rz<0b00, ab, asm, ZPR8, GPR32>; def _H : sve_int_perm_clast_rz<0b01, ab, asm, ZPR16, GPR32>; def _S : sve_int_perm_clast_rz<0b10, ab, asm, ZPR32, GPR32>; def _D : sve_int_perm_clast_rz<0b11, ab, asm, ZPR64, GPR64>; + + def : SVE_3_Op_Pat(NAME # _B)>; + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; } class sve_int_perm_clast_vz sz8_64, bit ab, string asm, @@ -4865,11 +4912,15 @@ let Constraints = "$Vdn = $_Vdn"; } -multiclass sve_int_perm_clast_vz { +multiclass sve_int_perm_clast_vz { def _B : sve_int_perm_clast_vz<0b00, ab, asm, ZPR8, FPR8>; def _H : sve_int_perm_clast_vz<0b01, ab, asm, ZPR16, FPR16>; def _S : sve_int_perm_clast_vz<0b10, ab, asm, ZPR32, FPR32>; def _D : sve_int_perm_clast_vz<0b11, ab, asm, ZPR64, FPR64>; + + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; } class sve_int_perm_clast_zz sz8_64, bit ab, string asm, @@ -4895,11 +4946,20 @@ let ElementSize = ElementSizeNone; } -multiclass sve_int_perm_clast_zz { +multiclass sve_int_perm_clast_zz { def _B : sve_int_perm_clast_zz<0b00, ab, asm, ZPR8>; def _H : sve_int_perm_clast_zz<0b01, ab, asm, ZPR16>; def _S : sve_int_perm_clast_zz<0b10, ab, asm, ZPR32>; def _D : sve_int_perm_clast_zz<0b11, ab, asm, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _B)>; + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; + + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; } class sve_int_perm_last_r sz8_64, bit ab, string asm, @@ -4921,11 +4981,16 @@ let Inst{4-0} = Rd; } -multiclass sve_int_perm_last_r { +multiclass sve_int_perm_last_r { def _B : sve_int_perm_last_r<0b00, ab, asm, ZPR8, GPR32>; def _H : sve_int_perm_last_r<0b01, ab, asm, ZPR16, GPR32>; def _S : sve_int_perm_last_r<0b10, ab, asm, ZPR32, GPR32>; def _D : sve_int_perm_last_r<0b11, ab, asm, ZPR64, GPR64>; + + def : SVE_2_Op_Pat(NAME # _B)>; + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; } class sve_int_perm_last_v sz8_64, bit ab, string asm, @@ -4947,11 +5012,16 @@ let Inst{4-0} = Vd; } -multiclass sve_int_perm_last_v { +multiclass sve_int_perm_last_v { def _B : sve_int_perm_last_v<0b00, ab, asm, ZPR8, FPR8>; def _H : sve_int_perm_last_v<0b01, ab, asm, ZPR16, FPR16>; def _S : sve_int_perm_last_v<0b10, ab, asm, ZPR32, FPR32>; def _D : sve_int_perm_last_v<0b11, ab, asm, ZPR64, FPR64>; + + def : SVE_2_Op_Pat(NAME # _H)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; } class sve_int_perm_splice sz8_64, string asm, ZPRRegOp zprty> @@ -4974,11 +5044,20 @@ let ElementSize = ElementSizeNone; } -multiclass sve_int_perm_splice { +multiclass sve_int_perm_splice { def _B : sve_int_perm_splice<0b00, asm, ZPR8>; def _H : sve_int_perm_splice<0b01, asm, ZPR16>; def _S : sve_int_perm_splice<0b10, asm, ZPR32>; def _D : sve_int_perm_splice<0b11, asm, ZPR64>; + + def : SVE_3_Op_Pat(NAME # _B)>; + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; + + def : SVE_3_Op_Pat(NAME # _H)>; + def : SVE_3_Op_Pat(NAME # _S)>; + def : SVE_3_Op_Pat(NAME # _D)>; } class sve2_int_perm_splice_cons sz8_64, string asm, @@ -5160,9 +5239,14 @@ let Inst{4-0} = Zd; } -multiclass sve_int_perm_compact { +multiclass sve_int_perm_compact { def _S : sve_int_perm_compact<0b0, asm, ZPR32>; def _D : sve_int_perm_compact<0b1, asm, ZPR64>; + + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _S)>; + def : SVE_2_Op_Pat(NAME # _D)>; + def : SVE_2_Op_Pat(NAME # _D)>; } Index: llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll =================================================================== --- llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll +++ llvm/test/CodeGen/AArch64/sve-intrinsics-perm-select.ll @@ -1,6 +1,734 @@ ; RUN: llc -mtriple=aarch64-linux-gnu -mattr=+sve < %s | FileCheck %s ; +; CLASTA (Vectors) +; + +define @clasta_i8( %pg, %a, %b) { +; CHECK-LABEL: clasta_i8: +; CHECK: clasta z0.b, p0, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clasta.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @clasta_i16( %pg, %a, %b) { +; CHECK-LABEL: clasta_i16: +; CHECK: clasta z0.h, p0, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clasta.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @clasta_i32( %pg, %a, %b) { +; CHECK-LABEL: clasta_i32: +; CHECK: clasta z0.s, p0, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clasta.nxv4i32( %pg, + %a, + %b) + ret %out +} + +define @clasta_i64( %pg, %a, %b) { +; CHECK-LABEL: clasta_i64: +; CHECK: clasta z0.d, p0, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clasta.nxv2i64( %pg, + %a, + %b) + ret %out +} + +define @clasta_f16( %pg, %a, %b) { +; CHECK-LABEL: clasta_f16: +; CHECK: clasta z0.h, p0, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clasta.nxv8f16( %pg, + %a, + %b) + ret %out +} + +define @clasta_f32( %pg, %a, %b) { +; CHECK-LABEL: clasta_f32: +; CHECK: clasta z0.s, p0, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clasta.nxv4f32( %pg, + %a, + %b) + ret %out +} + +define @clasta_f64( %pg, %a, %b) { +; CHECK-LABEL: clasta_f64: +; CHECK: clasta z0.d, p0, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clasta.nxv2f64( %pg, + %a, + %b) + ret %out +} + +; +; CLASTA (Scalar) +; + +define i8 @clasta_n_i8( %pg, i8 %a, %b) { +; CHECK-LABEL: clasta_n_i8: +; CHECK: clasta w0, p0, w0, z0.b +; CHECK-NEXT: ret + %out = call i8 @llvm.aarch64.sve.clasta.n.nxv16i8( %pg, + i8 %a, + %b) + ret i8 %out +} + +define i16 @clasta_n_i16( %pg, i16 %a, %b) { +; CHECK-LABEL: clasta_n_i16: +; CHECK: clasta w0, p0, w0, z0.h +; CHECK-NEXT: ret + %out = call i16 @llvm.aarch64.sve.clasta.n.nxv8i16( %pg, + i16 %a, + %b) + ret i16 %out +} + +define i32 @clasta_n_i32( %pg, i32 %a, %b) { +; CHECK-LABEL: clasta_n_i32: +; CHECK: clasta w0, p0, w0, z0.s +; CHECK-NEXT: ret + %out = call i32 @llvm.aarch64.sve.clasta.n.nxv4i32( %pg, + i32 %a, + %b) + ret i32 %out +} + +define i64 @clasta_n_i64( %pg, i64 %a, %b) { +; CHECK-LABEL: clasta_n_i64: +; CHECK: clasta x0, p0, x0, z0.d +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.clasta.n.nxv2i64( %pg, + i64 %a, + %b) + ret i64 %out +} + +define half @clasta_n_f16( %pg, half %a, %b) { +; CHECK-LABEL: clasta_n_f16: +; CHECK: clasta h0, p0, h0, z1.h +; CHECK-NEXT: ret + %out = call half @llvm.aarch64.sve.clasta.n.nxv8f16( %pg, + half %a, + %b) + ret half %out +} + +define float @clasta_n_f32( %pg, float %a, %b) { +; CHECK-LABEL: clasta_n_f32: +; CHECK: clasta s0, p0, s0, z1.s +; CHECK-NEXT: ret + %out = call float @llvm.aarch64.sve.clasta.n.nxv4f32( %pg, + float %a, + %b) + ret float %out +} + +define double @clasta_n_f64( %pg, double %a, %b) { +; CHECK-LABEL: clasta_n_f64: +; CHECK: clasta d0, p0, d0, z1.d +; CHECK-NEXT: ret + %out = call double @llvm.aarch64.sve.clasta.n.nxv2f64( %pg, + double %a, + %b) + ret double %out +} + +; +; CLASTB (Vectors) +; + +define @clastb_i8( %pg, %a, %b) { +; CHECK-LABEL: clastb_i8: +; CHECK: clastb z0.b, p0, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clastb.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @clastb_i16( %pg, %a, %b) { +; CHECK-LABEL: clastb_i16: +; CHECK: clastb z0.h, p0, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clastb.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @clastb_i32( %pg, %a, %b) { +; CHECK-LABEL: clastb_i32: +; CHECK: clastb z0.s, p0, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clastb.nxv4i32( %pg, + %a, + %b) + ret %out +} + +define @clastb_i64( %pg, %a, %b) { +; CHECK-LABEL: clastb_i64: +; CHECK: clastb z0.d, p0, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clastb.nxv2i64( %pg, + %a, + %b) + ret %out +} + +define @clastb_f16( %pg, %a, %b) { +; CHECK-LABEL: clastb_f16: +; CHECK: clastb z0.h, p0, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clastb.nxv8f16( %pg, + %a, + %b) + ret %out +} + +define @clastb_f32( %pg, %a, %b) { +; CHECK-LABEL: clastb_f32: +; CHECK: clastb z0.s, p0, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clastb.nxv4f32( %pg, + %a, + %b) + ret %out +} + +define @clastb_f64( %pg, %a, %b) { +; CHECK-LABEL: clastb_f64: +; CHECK: clastb z0.d, p0, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.clastb.nxv2f64( %pg, + %a, + %b) + ret %out +} + +; +; CLASTB (Scalar) +; + +define i8 @clastb_n_i8( %pg, i8 %a, %b) { +; CHECK-LABEL: clastb_n_i8: +; CHECK: clastb w0, p0, w0, z0.b +; CHECK-NEXT: ret + %out = call i8 @llvm.aarch64.sve.clastb.n.nxv16i8( %pg, + i8 %a, + %b) + ret i8 %out +} + +define i16 @clastb_n_i16( %pg, i16 %a, %b) { +; CHECK-LABEL: clastb_n_i16: +; CHECK: clastb w0, p0, w0, z0.h +; CHECK-NEXT: ret + %out = call i16 @llvm.aarch64.sve.clastb.n.nxv8i16( %pg, + i16 %a, + %b) + ret i16 %out +} + +define i32 @clastb_n_i32( %pg, i32 %a, %b) { +; CHECK-LABEL: clastb_n_i32: +; CHECK: clastb w0, p0, w0, z0.s +; CHECK-NEXT: ret + %out = call i32 @llvm.aarch64.sve.clastb.n.nxv4i32( %pg, + i32 %a, + %b) + ret i32 %out +} + +define i64 @clastb_n_i64( %pg, i64 %a, %b) { +; CHECK-LABEL: clastb_n_i64: +; CHECK: clastb x0, p0, x0, z0.d +; CHECK-NEXT: ret + %out = call i64 @llvm.aarch64.sve.clastb.n.nxv2i64( %pg, + i64 %a, + %b) + ret i64 %out +} + +define half @clastb_n_f16( %pg, half %a, %b) { +; CHECK-LABEL: clastb_n_f16: +; CHECK: clastb h0, p0, h0, z1.h +; CHECK-NEXT: ret + %out = call half @llvm.aarch64.sve.clastb.n.nxv8f16( %pg, + half %a, + %b) + ret half %out +} + +define float @clastb_n_f32( %pg, float %a, %b) { +; CHECK-LABEL: clastb_n_f32: +; CHECK: clastb s0, p0, s0, z1.s +; CHECK-NEXT: ret + %out = call float @llvm.aarch64.sve.clastb.n.nxv4f32( %pg, + float %a, + %b) + ret float %out +} + +define double @clastb_n_f64( %pg, double %a, %b) { +; CHECK-LABEL: clastb_n_f64: +; CHECK: clastb d0, p0, d0, z1.d +; CHECK-NEXT: ret + %out = call double @llvm.aarch64.sve.clastb.n.nxv2f64( %pg, + double %a, + %b) + ret double %out +} + +; +; EXT +; + +define @ext_i8( %a, %b) { +; CHECK-LABEL: ext_i8: +; CHECK: ext z0.b, z0.b, z1.b, #255 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.ext.nxv16i8( %a, + %b, + i32 255) + ret %out +} + +define @ext_i16( %a, %b) { +; CHECK-LABEL: ext_i16: +; CHECK: ext z0.b, z0.b, z1.b, #0 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.ext.nxv8i16( %a, + %b, + i32 0) + ret %out +} + +define @ext_i32( %a, %b) { +; CHECK-LABEL: ext_i32: +; CHECK: ext z0.b, z0.b, z1.b, #4 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.ext.nxv4i32( %a, + %b, + i32 1) + ret %out +} + +define @ext_i64( %a, %b) { +; CHECK-LABEL: ext_i64: +; CHECK: ext z0.b, z0.b, z1.b, #16 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.ext.nxv2i64( %a, + %b, + i32 2) + ret %out +} + +define @ext_f16( %a, %b) { +; CHECK-LABEL: ext_f16: +; CHECK: ext z0.b, z0.b, z1.b, #6 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.ext.nxv8f16( %a, + %b, + i32 3) + ret %out +} + +define @ext_f32( %a, %b) { +; CHECK-LABEL: ext_f32: +; CHECK: ext z0.b, z0.b, z1.b, #16 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.ext.nxv4f32( %a, + %b, + i32 4) + ret %out +} + +define @ext_f64( %a, %b) { +; CHECK-LABEL: ext_f64: +; CHECK: ext z0.b, z0.b, z1.b, #40 +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.ext.nxv2f64( %a, + %b, + i32 5) + ret %out +} + +; +; LASTA +; + +define i8 @lasta_i8( %pg, %a) { +; CHECK-LABEL: lasta_i8 +; CHECK: lasta w0, p0, z0.b +; CHECK-NEXT: ret + %res = call i8 @llvm.aarch64.sve.lasta.nxv16i8( %pg, + %a) + ret i8 %res +} + +define i16 @lasta_i16( %pg, %a) { +; CHECK-LABEL: lasta_i16 +; CHECK: lasta w0, p0, z0.h +; CHECK-NEXT: ret + %res = call i16 @llvm.aarch64.sve.lasta.nxv8i16( %pg, + %a) + ret i16 %res +} + +define i32 @lasta_i32( %pg, %a) { +; CHECK-LABEL: lasta_i32 +; CHECK: lasta w0, p0, z0.s +; CHECK-NEXT: ret + %res = call i32 @llvm.aarch64.sve.lasta.nxv4i32( %pg, + %a) + ret i32 %res +} + +define i64 @lasta_i64( %pg, %a) { +; CHECK-LABEL: lasta_i64 +; CHECK: lasta x0, p0, z0.d +; CHECK-NEXT: ret + %res = call i64 @llvm.aarch64.sve.lasta.nxv2i64( %pg, + %a) + ret i64 %res +} + +define half @lasta_f16( %pg, %a) { +; CHECK-LABEL: lasta_f16 +; CHECK: lasta h0, p0, z0.h +; CHECK-NEXT: ret + %res = call half @llvm.aarch64.sve.lasta.nxv8f16( %pg, + %a) + ret half %res +} + +define float @lasta_f32( %pg, %a) { +; CHECK-LABEL: lasta_f32 +; CHECK: lasta s0, p0, z0.s +; CHECK-NEXT: ret + %res = call float @llvm.aarch64.sve.lasta.nxv4f32( %pg, + %a) + ret float %res +} + +define float @lasta_f32_v2( %pg, %a) { +; CHECK-LABEL: lasta_f32_v2 +; CHECK: lasta s0, p0, z0.s +; CHECK-NEXT: ret + %res = call float @llvm.aarch64.sve.lasta.nxv2f32( %pg, + %a) + ret float %res +} + +define double @lasta_f64( %pg, %a) { +; CHECK-LABEL: lasta_f64 +; CHECK: lasta d0, p0, z0.d +; CHECK-NEXT: ret + %res = call double @llvm.aarch64.sve.lasta.nxv2f64( %pg, + %a) + ret double %res +} + +; +; LASTB +; + +define i8 @lastb_i8( %pg, %a) { +; CHECK-LABEL: lastb_i8 +; CHECK: lastb w0, p0, z0.b +; CHECK-NEXT: ret + %res = call i8 @llvm.aarch64.sve.lastb.nxv16i8( %pg, + %a) + ret i8 %res +} + +define i16 @lastb_i16( %pg, %a) { +; CHECK-LABEL: lastb_i16 +; CHECK: lastb w0, p0, z0.h +; CHECK-NEXT: ret + %res = call i16 @llvm.aarch64.sve.lastb.nxv8i16( %pg, + %a) + ret i16 %res +} + +define i32 @lastb_i32( %pg, %a) { +; CHECK-LABEL: lastb_i32 +; CHECK: lastb w0, p0, z0.s +; CHECK-NEXT: ret + %res = call i32 @llvm.aarch64.sve.lastb.nxv4i32( %pg, + %a) + ret i32 %res +} + +define i64 @lastb_i64( %pg, %a) { +; CHECK-LABEL: lastb_i64 +; CHECK: lastb x0, p0, z0.d +; CHECK-NEXT: ret + %res = call i64 @llvm.aarch64.sve.lastb.nxv2i64( %pg, + %a) + ret i64 %res +} + +define half @lastb_f16( %pg, %a) { +; CHECK-LABEL: lastb_f16 +; CHECK: lastb h0, p0, z0.h +; CHECK-NEXT: ret + %res = call half @llvm.aarch64.sve.lastb.nxv8f16( %pg, + %a) + ret half %res +} + +define float @lastb_f32( %pg, %a) { +; CHECK-LABEL: lastb_f32 +; CHECK: lastb s0, p0, z0.s +; CHECK-NEXT: ret + %res = call float @llvm.aarch64.sve.lastb.nxv4f32( %pg, + %a) + ret float %res +} + +define float @lastb_f32_v2( %pg, %a) { +; CHECK-LABEL: lastb_f32_v2 +; CHECK: lastb s0, p0, z0.s +; CHECK-NEXT: ret + %res = call float @llvm.aarch64.sve.lastb.nxv2f32( %pg, + %a) + ret float %res +} + +define double @lastb_f64( %pg, %a) { +; CHECK-LABEL: lastb_f64 +; CHECK: lastb d0, p0, z0.d +; CHECK-NEXT: ret + %res = call double @llvm.aarch64.sve.lastb.nxv2f64( %pg, + %a) + ret double %res +} + +; +; COMPACT +; + +define @compact_i32( %pg, %a) { +; CHECK-LABEL: compact_i32: +; CHECK: compact z0.s, p0, z0.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.compact.nxv4i32( %pg, + %a) + ret %out +} + +define @compact_i64( %pg, %a) { +; CHECK-LABEL: compact_i64: +; CHECK: compact z0.d, p0, z0.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.compact.nxv2i64( %pg, + %a) + ret %out +} + +define @compact_f32( %pg, %a) { +; CHECK-LABEL: compact_f32: +; CHECK: compact z0.s, p0, z0.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.compact.nxv4f32( %pg, + %a) + ret %out +} + +define @compact_f64( %pg, %a) { +; CHECK-LABEL: compact_f64: +; CHECK: compact z0.d, p0, z0.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.compact.nxv2f64( %pg, + %a) + ret %out +} + +; +; REV +; + +define @rev_b8( %a) { +; CHECK-LABEL: rev_b8 +; CHECK: rev p0.b, p0.b +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.rev.nxv16i1( %a) + ret %res +} + +define @rev_b16( %a) { +; CHECK-LABEL: rev_b16 +; CHECK: rev p0.h, p0.h +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.rev.nxv8i1( %a) + ret %res +} + +define @rev_b32( %a) { +; CHECK-LABEL: rev_b32 +; CHECK: rev p0.s, p0.s +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.rev.nxv4i1( %a) + ret %res +} + +define @rev_b64( %a) { +; CHECK-LABEL: rev_b64 +; CHECK: rev p0.d, p0.d +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.rev.nxv2i1( %a) + ret %res +} + +define @rev_i8( %a) { +; CHECK-LABEL: rev_i8 +; CHECK: rev z0.b, z0.b +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.rev.nxv16i8( %a) + ret %res +} + +define @rev_i16( %a) { +; CHECK-LABEL: rev_i16 +; CHECK: rev z0.h, z0.h +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.rev.nxv8i16( %a) + ret %res +} + +define @rev_i32( %a) { +; CHECK-LABEL: rev_i32 +; CHECK: rev z0.s, z0.s +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.rev.nxv4i32( %a) + ret %res +} + +define @rev_i64( %a) { +; CHECK-LABEL: rev_i64 +; CHECK: rev z0.d, z0.d +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.rev.nxv2i64( %a) + ret %res +} + +define @rev_f16( %a) { +; CHECK-LABEL: rev_f16 +; CHECK: rev z0.h, z0.h +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.rev.nxv8f16( %a) + ret %res +} + +define @rev_f32( %a) { +; CHECK-LABEL: rev_f32 +; CHECK: rev z0.s, z0.s +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.rev.nxv4f32( %a) + ret %res +} + +define @rev_f64( %a) { +; CHECK-LABEL: rev_f64 +; CHECK: rev z0.d, z0.d +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.rev.nxv2f64( %a) + ret %res +} + +; +; SPLICE +; + +define @splice_i8( %pg, %a, %b) { +; CHECK-LABEL: splice_i8: +; CHECK: splice z0.b, p0, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.splice.nxv16i8( %pg, + %a, + %b) + ret %out +} + +define @splice_i16( %pg, %a, %b) { +; CHECK-LABEL: splice_i16: +; CHECK: splice z0.h, p0, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.splice.nxv8i16( %pg, + %a, + %b) + ret %out +} + +define @splice_i32( %pg, %a, %b) { +; CHECK-LABEL: splice_i32: +; CHECK: splice z0.s, p0, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.splice.nxv4i32( %pg, + %a, + %b) + ret %out +} + +define @splice_i64( %pg, %a, %b) { +; CHECK-LABEL: splice_i64: +; CHECK: splice z0.d, p0, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.splice.nxv2i64( %pg, + %a, + %b) + ret %out +} + +define @splice_f16( %pg, %a, %b) { +; CHECK-LABEL: splice_f16: +; CHECK: splice z0.h, p0, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.splice.nxv8f16( %pg, + %a, + %b) + ret %out +} + +define @splice_f32( %pg, %a, %b) { +; CHECK-LABEL: splice_f32: +; CHECK: splice z0.s, p0, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.splice.nxv4f32( %pg, + %a, + %b) + ret %out +} + +define @splice_f64( %pg, %a, %b) { +; CHECK-LABEL: splice_f64: +; CHECK: splice z0.d, p0, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.splice.nxv2f64( %pg, + %a, + %b) + ret %out +} + +; ; SUNPKHI ; @@ -48,12 +776,79 @@ ret %res } -define @sunpklo_i64( %a) { -; CHECK-LABEL: sunpklo_i64 -; CHECK: sunpklo z0.d, z0.s +define @sunpklo_i64( %a) { +; CHECK-LABEL: sunpklo_i64 +; CHECK: sunpklo z0.d, z0.s +; CHECK-NEXT: ret + %res = call @llvm.aarch64.sve.sunpklo.nxv2i64( %a) + ret %res +} + +; +; TBL +; + +define @tbl_i8( %a, %b) { +; CHECK-LABEL: tbl_i8: +; CHECK: tbl z0.b, { z0.b }, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl.nxv16i8( %a, + %b) + ret %out +} + +define @tbl_i16( %a, %b) { +; CHECK-LABEL: tbl_i16: +; CHECK: tbl z0.h, { z0.h }, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl.nxv8i16( %a, + %b) + ret %out +} + +define @tbl_i32( %a, %b) { +; CHECK-LABEL: tbl_i32: +; CHECK: tbl z0.s, { z0.s }, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl.nxv4i32( %a, + %b) + ret %out +} + +define @tbl_i64( %a, %b) { +; CHECK-LABEL: tbl_i64: +; CHECK: tbl z0.d, { z0.d }, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl.nxv2i64( %a, + %b) + ret %out +} + +define @tbl_f16( %a, %b) { +; CHECK-LABEL: tbl_f16: +; CHECK: tbl z0.h, { z0.h }, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl.nxv8f16( %a, + %b) + ret %out +} + +define @tbl_f32( %a, %b) { +; CHECK-LABEL: tbl_f32: +; CHECK: tbl z0.s, { z0.s }, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.tbl.nxv4f32( %a, + %b) + ret %out +} + +define @tbl_f64( %a, %b) { +; CHECK-LABEL: tbl_f64: +; CHECK: tbl z0.d, { z0.d }, z1.d ; CHECK-NEXT: ret - %res = call @llvm.aarch64.sve.sunpklo.nxv2i64( %a) - ret %res + %out = call @llvm.aarch64.sve.tbl.nxv2f64( %a, + %b) + ret %out } ; @@ -112,6 +907,761 @@ ret %res } +; +; TRN1 +; + +define @trn1_b8( %a, %b) { +; CHECK-LABEL: trn1_b8: +; CHECK: trn1 p0.b, p0.b, p1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn1.nxv16i1( %a, + %b) + ret %out +} + +define @trn1_b16( %a, %b) { +; CHECK-LABEL: trn1_b16: +; CHECK: trn1 p0.h, p0.h, p1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn1.nxv8i1( %a, + %b) + ret %out +} + +define @trn1_b32( %a, %b) { +; CHECK-LABEL: trn1_b32: +; CHECK: trn1 p0.s, p0.s, p1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn1.nxv4i1( %a, + %b) + ret %out +} + +define @trn1_b64( %a, %b) { +; CHECK-LABEL: trn1_b64: +; CHECK: trn1 p0.d, p0.d, p1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn1.nxv2i1( %a, + %b) + ret %out +} + +define @trn1_i8( %a, %b) { +; CHECK-LABEL: trn1_i8: +; CHECK: trn1 z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn1.nxv16i8( %a, + %b) + ret %out +} + +define @trn1_i16( %a, %b) { +; CHECK-LABEL: trn1_i16: +; CHECK: trn1 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn1.nxv8i16( %a, + %b) + ret %out +} + +define @trn1_i32( %a, %b) { +; CHECK-LABEL: trn1_i32: +; CHECK: trn1 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn1.nxv4i32( %a, + %b) + ret %out +} + +define @trn1_i64( %a, %b) { +; CHECK-LABEL: trn1_i64: +; CHECK: trn1 z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn1.nxv2i64( %a, + %b) + ret %out +} + +define @trn1_f16_v4( %a, %b) { +; CHECK-LABEL: trn1_f16_v4: +; CHECK: trn1 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn1.nxv4f16( %a, + %b) + ret %out +} + +define @trn1_f16( %a, %b) { +; CHECK-LABEL: trn1_f16: +; CHECK: trn1 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn1.nxv8f16( %a, + %b) + ret %out +} + +define @trn1_f32( %a, %b) { +; CHECK-LABEL: trn1_f32: +; CHECK: trn1 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn1.nxv4f32( %a, + %b) + ret %out +} + +define @trn1_f64( %a, %b) { +; CHECK-LABEL: trn1_f64: +; CHECK: trn1 z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn1.nxv2f64( %a, + %b) + ret %out +} + +; +; TRN2 +; + +define @trn2_b8( %a, %b) { +; CHECK-LABEL: trn2_b8: +; CHECK: trn2 p0.b, p0.b, p1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn2.nxv16i1( %a, + %b) + ret %out +} + +define @trn2_b16( %a, %b) { +; CHECK-LABEL: trn2_b16: +; CHECK: trn2 p0.h, p0.h, p1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn2.nxv8i1( %a, + %b) + ret %out +} + +define @trn2_b32( %a, %b) { +; CHECK-LABEL: trn2_b32: +; CHECK: trn2 p0.s, p0.s, p1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn2.nxv4i1( %a, + %b) + ret %out +} + +define @trn2_b64( %a, %b) { +; CHECK-LABEL: trn2_b64: +; CHECK: trn2 p0.d, p0.d, p1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn2.nxv2i1( %a, + %b) + ret %out +} + +define @trn2_i8( %a, %b) { +; CHECK-LABEL: trn2_i8: +; CHECK: trn2 z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn2.nxv16i8( %a, + %b) + ret %out +} + +define @trn2_i16( %a, %b) { +; CHECK-LABEL: trn2_i16: +; CHECK: trn2 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn2.nxv8i16( %a, + %b) + ret %out +} + +define @trn2_i32( %a, %b) { +; CHECK-LABEL: trn2_i32: +; CHECK: trn2 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn2.nxv4i32( %a, + %b) + ret %out +} + +define @trn2_i64( %a, %b) { +; CHECK-LABEL: trn2_i64: +; CHECK: trn2 z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn2.nxv2i64( %a, + %b) + ret %out +} + +define @trn2_f16_v4( %a, %b) { +; CHECK-LABEL: trn2_f16_v4: +; CHECK: trn2 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn2.nxv4f16( %a, + %b) + ret %out +} + +define @trn2_f16( %a, %b) { +; CHECK-LABEL: trn2_f16: +; CHECK: trn2 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn2.nxv8f16( %a, + %b) + ret %out +} + +define @trn2_f32( %a, %b) { +; CHECK-LABEL: trn2_f32: +; CHECK: trn2 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn2.nxv4f32( %a, + %b) + ret %out +} + +define @trn2_f64( %a, %b) { +; CHECK-LABEL: trn2_f64: +; CHECK: trn2 z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.trn2.nxv2f64( %a, + %b) + ret %out +} + +; +; UZP1 +; + +define @uzp1_b8( %a, %b) { +; CHECK-LABEL: uzp1_b8: +; CHECK: uzp1 p0.b, p0.b, p1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp1.nxv16i1( %a, + %b) + ret %out +} + +define @uzp1_b16( %a, %b) { +; CHECK-LABEL: uzp1_b16: +; CHECK: uzp1 p0.h, p0.h, p1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp1.nxv8i1( %a, + %b) + ret %out +} + +define @uzp1_b32( %a, %b) { +; CHECK-LABEL: uzp1_b32: +; CHECK: uzp1 p0.s, p0.s, p1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp1.nxv4i1( %a, + %b) + ret %out +} + +define @uzp1_b64( %a, %b) { +; CHECK-LABEL: uzp1_b64: +; CHECK: uzp1 p0.d, p0.d, p1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp1.nxv2i1( %a, + %b) + ret %out +} + +define @uzp1_i8( %a, %b) { +; CHECK-LABEL: uzp1_i8: +; CHECK: uzp1 z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp1.nxv16i8( %a, + %b) + ret %out +} + +define @uzp1_i16( %a, %b) { +; CHECK-LABEL: uzp1_i16: +; CHECK: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp1.nxv8i16( %a, + %b) + ret %out +} + +define @uzp1_i32( %a, %b) { +; CHECK-LABEL: uzp1_i32: +; CHECK: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp1.nxv4i32( %a, + %b) + ret %out +} + +define @uzp1_i64( %a, %b) { +; CHECK-LABEL: uzp1_i64: +; CHECK: uzp1 z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp1.nxv2i64( %a, + %b) + ret %out +} + +define @uzp1_f16_v4( %a, %b) { +; CHECK-LABEL: uzp1_f16_v4: +; CHECK: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp1.nxv4f16( %a, + %b) + ret %out +} + +define @uzp1_f16( %a, %b) { +; CHECK-LABEL: uzp1_f16: +; CHECK: uzp1 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp1.nxv8f16( %a, + %b) + ret %out +} + +define @uzp1_f32( %a, %b) { +; CHECK-LABEL: uzp1_f32: +; CHECK: uzp1 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp1.nxv4f32( %a, + %b) + ret %out +} + +define @uzp1_f64( %a, %b) { +; CHECK-LABEL: uzp1_f64: +; CHECK: uzp1 z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp1.nxv2f64( %a, + %b) + ret %out +} + +; +; UZP2 +; + +define @uzp2_b8( %a, %b) { +; CHECK-LABEL: uzp2_b8: +; CHECK: uzp2 p0.b, p0.b, p1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp2.nxv16i1( %a, + %b) + ret %out +} + +define @uzp2_b16( %a, %b) { +; CHECK-LABEL: uzp2_b16: +; CHECK: uzp2 p0.h, p0.h, p1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp2.nxv8i1( %a, + %b) + ret %out +} + +define @uzp2_b32( %a, %b) { +; CHECK-LABEL: uzp2_b32: +; CHECK: uzp2 p0.s, p0.s, p1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp2.nxv4i1( %a, + %b) + ret %out +} + +define @uzp2_b64( %a, %b) { +; CHECK-LABEL: uzp2_b64: +; CHECK: uzp2 p0.d, p0.d, p1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp2.nxv2i1( %a, + %b) + ret %out +} + +define @uzp2_i8( %a, %b) { +; CHECK-LABEL: uzp2_i8: +; CHECK: uzp2 z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp2.nxv16i8( %a, + %b) + ret %out +} + +define @uzp2_i16( %a, %b) { +; CHECK-LABEL: uzp2_i16: +; CHECK: uzp2 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp2.nxv8i16( %a, + %b) + ret %out +} + +define @uzp2_i32( %a, %b) { +; CHECK-LABEL: uzp2_i32: +; CHECK: uzp2 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp2.nxv4i32( %a, + %b) + ret %out +} + +define @uzp2_i64( %a, %b) { +; CHECK-LABEL: uzp2_i64: +; CHECK: uzp2 z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp2.nxv2i64( %a, + %b) + ret %out +} + +define @uzp2_f16_v4( %a, %b) { +; CHECK-LABEL: uzp2_f16_v4: +; CHECK: uzp2 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp2.nxv4f16( %a, + %b) + ret %out +} + +define @uzp2_f16( %a, %b) { +; CHECK-LABEL: uzp2_f16: +; CHECK: uzp2 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp2.nxv8f16( %a, + %b) + ret %out +} + +define @uzp2_f32( %a, %b) { +; CHECK-LABEL: uzp2_f32: +; CHECK: uzp2 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp2.nxv4f32( %a, + %b) + ret %out +} + +define @uzp2_f64( %a, %b) { +; CHECK-LABEL: uzp2_f64: +; CHECK: uzp2 z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.uzp2.nxv2f64( %a, + %b) + ret %out +} + +; +; ZIP1 +; + +define @zip1_b8( %a, %b) { +; CHECK-LABEL: zip1_b8: +; CHECK: zip1 p0.b, p0.b, p1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip1.nxv16i1( %a, + %b) + ret %out +} + +define @zip1_b16( %a, %b) { +; CHECK-LABEL: zip1_b16: +; CHECK: zip1 p0.h, p0.h, p1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip1.nxv8i1( %a, + %b) + ret %out +} + +define @zip1_b32( %a, %b) { +; CHECK-LABEL: zip1_b32: +; CHECK: zip1 p0.s, p0.s, p1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip1.nxv4i1( %a, + %b) + ret %out +} + +define @zip1_b64( %a, %b) { +; CHECK-LABEL: zip1_b64: +; CHECK: zip1 p0.d, p0.d, p1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip1.nxv2i1( %a, + %b) + ret %out +} + +define @zip1_i8( %a, %b) { +; CHECK-LABEL: zip1_i8: +; CHECK: zip1 z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip1.nxv16i8( %a, + %b) + ret %out +} + +define @zip1_i16( %a, %b) { +; CHECK-LABEL: zip1_i16: +; CHECK: zip1 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip1.nxv8i16( %a, + %b) + ret %out +} + +define @zip1_i32( %a, %b) { +; CHECK-LABEL: zip1_i32: +; CHECK: zip1 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip1.nxv4i32( %a, + %b) + ret %out +} + +define @zip1_i64( %a, %b) { +; CHECK-LABEL: zip1_i64: +; CHECK: zip1 z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip1.nxv2i64( %a, + %b) + ret %out +} + +define @zip1_f16_v4( %a, %b) { +; CHECK-LABEL: zip1_f16_v4: +; CHECK: zip1 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip1.nxv4f16( %a, + %b) + ret %out +} + +define @zip1_f16( %a, %b) { +; CHECK-LABEL: zip1_f16: +; CHECK: zip1 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip1.nxv8f16( %a, + %b) + ret %out +} + +define @zip1_f32( %a, %b) { +; CHECK-LABEL: zip1_f32: +; CHECK: zip1 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip1.nxv4f32( %a, + %b) + ret %out +} + +define @zip1_f64( %a, %b) { +; CHECK-LABEL: zip1_f64: +; CHECK: zip1 z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip1.nxv2f64( %a, + %b) + ret %out +} + +; +; ZIP2 +; + +define @zip2_b8( %a, %b) { +; CHECK-LABEL: zip2_b8: +; CHECK: zip2 p0.b, p0.b, p1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip2.nxv16i1( %a, + %b) + ret %out +} + +define @zip2_b16( %a, %b) { +; CHECK-LABEL: zip2_b16: +; CHECK: zip2 p0.h, p0.h, p1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip2.nxv8i1( %a, + %b) + ret %out +} + +define @zip2_b32( %a, %b) { +; CHECK-LABEL: zip2_b32: +; CHECK: zip2 p0.s, p0.s, p1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip2.nxv4i1( %a, + %b) + ret %out +} + +define @zip2_b64( %a, %b) { +; CHECK-LABEL: zip2_b64: +; CHECK: zip2 p0.d, p0.d, p1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip2.nxv2i1( %a, + %b) + ret %out +} + +define @zip2_i8( %a, %b) { +; CHECK-LABEL: zip2_i8: +; CHECK: zip2 z0.b, z0.b, z1.b +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip2.nxv16i8( %a, + %b) + ret %out +} + +define @zip2_i16( %a, %b) { +; CHECK-LABEL: zip2_i16: +; CHECK: zip2 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip2.nxv8i16( %a, + %b) + ret %out +} + +define @zip2_i32( %a, %b) { +; CHECK-LABEL: zip2_i32: +; CHECK: zip2 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip2.nxv4i32( %a, + %b) + ret %out +} + +define @zip2_i64( %a, %b) { +; CHECK-LABEL: zip2_i64: +; CHECK: zip2 z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip2.nxv2i64( %a, + %b) + ret %out +} + +define @zip2_f16_v4( %a, %b) { +; CHECK-LABEL: zip2_f16_v4: +; CHECK: zip2 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip2.nxv4f16( %a, + %b) + ret %out +} + +define @zip2_f16( %a, %b) { +; CHECK-LABEL: zip2_f16: +; CHECK: zip2 z0.h, z0.h, z1.h +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip2.nxv8f16( %a, + %b) + ret %out +} + +define @zip2_f32( %a, %b) { +; CHECK-LABEL: zip2_f32: +; CHECK: zip2 z0.s, z0.s, z1.s +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip2.nxv4f32( %a, + %b) + ret %out +} + +define @zip2_f64( %a, %b) { +; CHECK-LABEL: zip2_f64: +; CHECK: zip2 z0.d, z0.d, z1.d +; CHECK-NEXT: ret + %out = call @llvm.aarch64.sve.zip2.nxv2f64( %a, + %b) + ret %out +} + +declare @llvm.aarch64.sve.clasta.nxv16i8(, , ) +declare @llvm.aarch64.sve.clasta.nxv8i16(, , ) +declare @llvm.aarch64.sve.clasta.nxv4i32(, , ) +declare @llvm.aarch64.sve.clasta.nxv2i64(, , ) +declare @llvm.aarch64.sve.clasta.nxv8f16(, , ) +declare @llvm.aarch64.sve.clasta.nxv4f32(, , ) +declare @llvm.aarch64.sve.clasta.nxv2f64(, , ) + +declare i8 @llvm.aarch64.sve.clasta.n.nxv16i8(, i8, ) +declare i16 @llvm.aarch64.sve.clasta.n.nxv8i16(, i16, ) +declare i32 @llvm.aarch64.sve.clasta.n.nxv4i32(, i32, ) +declare i64 @llvm.aarch64.sve.clasta.n.nxv2i64(, i64, ) +declare half @llvm.aarch64.sve.clasta.n.nxv8f16(, half, ) +declare float @llvm.aarch64.sve.clasta.n.nxv4f32(, float, ) +declare double @llvm.aarch64.sve.clasta.n.nxv2f64(, double, ) + +declare @llvm.aarch64.sve.clastb.nxv16i8(, , ) +declare @llvm.aarch64.sve.clastb.nxv8i16(, , ) +declare @llvm.aarch64.sve.clastb.nxv4i32(, , ) +declare @llvm.aarch64.sve.clastb.nxv2i64(, , ) +declare @llvm.aarch64.sve.clastb.nxv8f16(, , ) +declare @llvm.aarch64.sve.clastb.nxv4f32(, , ) +declare @llvm.aarch64.sve.clastb.nxv2f64(, , ) + +declare i8 @llvm.aarch64.sve.clastb.n.nxv16i8(, i8, ) +declare i16 @llvm.aarch64.sve.clastb.n.nxv8i16(, i16, ) +declare i32 @llvm.aarch64.sve.clastb.n.nxv4i32(, i32, ) +declare i64 @llvm.aarch64.sve.clastb.n.nxv2i64(, i64, ) +declare half @llvm.aarch64.sve.clastb.n.nxv8f16(, half, ) +declare float @llvm.aarch64.sve.clastb.n.nxv4f32(, float, ) +declare double @llvm.aarch64.sve.clastb.n.nxv2f64(, double, ) + +declare @llvm.aarch64.sve.compact.nxv4i32(, ) +declare @llvm.aarch64.sve.compact.nxv2i64(, ) +declare @llvm.aarch64.sve.compact.nxv4f32(, ) +declare @llvm.aarch64.sve.compact.nxv2f64(, ) + +declare @llvm.aarch64.sve.ext.nxv16i8(, , i32) +declare @llvm.aarch64.sve.ext.nxv8i16(, , i32) +declare @llvm.aarch64.sve.ext.nxv4i32(, , i32) +declare @llvm.aarch64.sve.ext.nxv2i64(, , i32) +declare @llvm.aarch64.sve.ext.nxv8f16(, , i32) +declare @llvm.aarch64.sve.ext.nxv4f32(, , i32) +declare @llvm.aarch64.sve.ext.nxv2f64(, , i32) + +declare i8 @llvm.aarch64.sve.lasta.nxv16i8(, ) +declare i16 @llvm.aarch64.sve.lasta.nxv8i16(, ) +declare i32 @llvm.aarch64.sve.lasta.nxv4i32(, ) +declare i64 @llvm.aarch64.sve.lasta.nxv2i64(, ) +declare half @llvm.aarch64.sve.lasta.nxv8f16(, ) +declare float @llvm.aarch64.sve.lasta.nxv2f32(, ) +declare float @llvm.aarch64.sve.lasta.nxv4f32(, ) +declare double @llvm.aarch64.sve.lasta.nxv2f64(, ) + +declare i8 @llvm.aarch64.sve.lastb.nxv16i8(, ) +declare i16 @llvm.aarch64.sve.lastb.nxv8i16(, ) +declare i32 @llvm.aarch64.sve.lastb.nxv4i32(, ) +declare i64 @llvm.aarch64.sve.lastb.nxv2i64(, ) +declare half @llvm.aarch64.sve.lastb.nxv8f16(, ) +declare float @llvm.aarch64.sve.lastb.nxv2f32(, ) +declare float @llvm.aarch64.sve.lastb.nxv4f32(, ) +declare double @llvm.aarch64.sve.lastb.nxv2f64(, ) + +declare @llvm.aarch64.sve.rev.nxv16i1() +declare @llvm.aarch64.sve.rev.nxv8i1() +declare @llvm.aarch64.sve.rev.nxv4i1() +declare @llvm.aarch64.sve.rev.nxv2i1() +declare @llvm.aarch64.sve.rev.nxv16i8() +declare @llvm.aarch64.sve.rev.nxv8i16() +declare @llvm.aarch64.sve.rev.nxv4i32() +declare @llvm.aarch64.sve.rev.nxv2i64() +declare @llvm.aarch64.sve.rev.nxv8f16() +declare @llvm.aarch64.sve.rev.nxv4f32() +declare @llvm.aarch64.sve.rev.nxv2f64() + +declare @llvm.aarch64.sve.splice.nxv16i8(, , ) +declare @llvm.aarch64.sve.splice.nxv8i16(, , ) +declare @llvm.aarch64.sve.splice.nxv4i32(, , ) +declare @llvm.aarch64.sve.splice.nxv2i64(, , ) +declare @llvm.aarch64.sve.splice.nxv8f16(, , ) +declare @llvm.aarch64.sve.splice.nxv4f32(, , ) +declare @llvm.aarch64.sve.splice.nxv2f64(, , ) + declare @llvm.aarch64.sve.sunpkhi.nxv8i16() declare @llvm.aarch64.sve.sunpkhi.nxv4i32() declare @llvm.aarch64.sve.sunpkhi.nxv2i64() @@ -120,6 +1670,14 @@ declare @llvm.aarch64.sve.sunpklo.nxv4i32() declare @llvm.aarch64.sve.sunpklo.nxv2i64() +declare @llvm.aarch64.sve.tbl.nxv16i8(, ) +declare @llvm.aarch64.sve.tbl.nxv8i16(, ) +declare @llvm.aarch64.sve.tbl.nxv4i32(, ) +declare @llvm.aarch64.sve.tbl.nxv2i64(, ) +declare @llvm.aarch64.sve.tbl.nxv8f16(, ) +declare @llvm.aarch64.sve.tbl.nxv4f32(, ) +declare @llvm.aarch64.sve.tbl.nxv2f64(, ) + declare @llvm.aarch64.sve.uunpkhi.nxv8i16() declare @llvm.aarch64.sve.uunpkhi.nxv4i32() declare @llvm.aarch64.sve.uunpkhi.nxv2i64() @@ -127,3 +1685,81 @@ declare @llvm.aarch64.sve.uunpklo.nxv8i16() declare @llvm.aarch64.sve.uunpklo.nxv4i32() declare @llvm.aarch64.sve.uunpklo.nxv2i64() + +declare @llvm.aarch64.sve.trn1.nxv16i1(, ) +declare @llvm.aarch64.sve.trn1.nxv8i1(, ) +declare @llvm.aarch64.sve.trn1.nxv4i1(, ) +declare @llvm.aarch64.sve.trn1.nxv2i1(, ) +declare @llvm.aarch64.sve.trn1.nxv16i8(, ) +declare @llvm.aarch64.sve.trn1.nxv8i16(, ) +declare @llvm.aarch64.sve.trn1.nxv4i32(, ) +declare @llvm.aarch64.sve.trn1.nxv2i64(, ) +declare @llvm.aarch64.sve.trn1.nxv4f16(, ) +declare @llvm.aarch64.sve.trn1.nxv8f16(, ) +declare @llvm.aarch64.sve.trn1.nxv4f32(, ) +declare @llvm.aarch64.sve.trn1.nxv2f64(, ) + +declare @llvm.aarch64.sve.trn2.nxv16i1(, ) +declare @llvm.aarch64.sve.trn2.nxv8i1(, ) +declare @llvm.aarch64.sve.trn2.nxv4i1(, ) +declare @llvm.aarch64.sve.trn2.nxv2i1(, ) +declare @llvm.aarch64.sve.trn2.nxv16i8(, ) +declare @llvm.aarch64.sve.trn2.nxv8i16(, ) +declare @llvm.aarch64.sve.trn2.nxv4i32(, ) +declare @llvm.aarch64.sve.trn2.nxv2i64(, ) +declare @llvm.aarch64.sve.trn2.nxv4f16(, ) +declare @llvm.aarch64.sve.trn2.nxv8f16(, ) +declare @llvm.aarch64.sve.trn2.nxv4f32(, ) +declare @llvm.aarch64.sve.trn2.nxv2f64(, ) + +declare @llvm.aarch64.sve.uzp1.nxv16i1(, ) +declare @llvm.aarch64.sve.uzp1.nxv8i1(, ) +declare @llvm.aarch64.sve.uzp1.nxv4i1(, ) +declare @llvm.aarch64.sve.uzp1.nxv2i1(, ) +declare @llvm.aarch64.sve.uzp1.nxv16i8(, ) +declare @llvm.aarch64.sve.uzp1.nxv8i16(, ) +declare @llvm.aarch64.sve.uzp1.nxv4i32(, ) +declare @llvm.aarch64.sve.uzp1.nxv2i64(, ) +declare @llvm.aarch64.sve.uzp1.nxv4f16(, ) +declare @llvm.aarch64.sve.uzp1.nxv8f16(, ) +declare @llvm.aarch64.sve.uzp1.nxv4f32(, ) +declare @llvm.aarch64.sve.uzp1.nxv2f64(, ) + +declare @llvm.aarch64.sve.uzp2.nxv16i1(, ) +declare @llvm.aarch64.sve.uzp2.nxv8i1(, ) +declare @llvm.aarch64.sve.uzp2.nxv4i1(, ) +declare @llvm.aarch64.sve.uzp2.nxv2i1(, ) +declare @llvm.aarch64.sve.uzp2.nxv16i8(, ) +declare @llvm.aarch64.sve.uzp2.nxv8i16(, ) +declare @llvm.aarch64.sve.uzp2.nxv4i32(, ) +declare @llvm.aarch64.sve.uzp2.nxv2i64(, ) +declare @llvm.aarch64.sve.uzp2.nxv4f16(, ) +declare @llvm.aarch64.sve.uzp2.nxv8f16(, ) +declare @llvm.aarch64.sve.uzp2.nxv4f32(, ) +declare @llvm.aarch64.sve.uzp2.nxv2f64(, ) + +declare @llvm.aarch64.sve.zip1.nxv16i1(, ) +declare @llvm.aarch64.sve.zip1.nxv8i1(, ) +declare @llvm.aarch64.sve.zip1.nxv4i1(, ) +declare @llvm.aarch64.sve.zip1.nxv2i1(, ) +declare @llvm.aarch64.sve.zip1.nxv16i8(, ) +declare @llvm.aarch64.sve.zip1.nxv8i16(, ) +declare @llvm.aarch64.sve.zip1.nxv4i32(, ) +declare @llvm.aarch64.sve.zip1.nxv2i64(, ) +declare @llvm.aarch64.sve.zip1.nxv4f16(, ) +declare @llvm.aarch64.sve.zip1.nxv8f16(, ) +declare @llvm.aarch64.sve.zip1.nxv4f32(, ) +declare @llvm.aarch64.sve.zip1.nxv2f64(, ) + +declare @llvm.aarch64.sve.zip2.nxv16i1(, ) +declare @llvm.aarch64.sve.zip2.nxv8i1(, ) +declare @llvm.aarch64.sve.zip2.nxv4i1(, ) +declare @llvm.aarch64.sve.zip2.nxv2i1(, ) +declare @llvm.aarch64.sve.zip2.nxv16i8(, ) +declare @llvm.aarch64.sve.zip2.nxv8i16(, ) +declare @llvm.aarch64.sve.zip2.nxv4i32(, ) +declare @llvm.aarch64.sve.zip2.nxv2i64(, ) +declare @llvm.aarch64.sve.zip2.nxv4f16(, ) +declare @llvm.aarch64.sve.zip2.nxv8f16(, ) +declare @llvm.aarch64.sve.zip2.nxv4f32(, ) +declare @llvm.aarch64.sve.zip2.nxv2f64(, )