diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -15287,6 +15287,8 @@ respectively). The vector intrinsics, such as ``llvm.bswap.v4i32``, operate on a per-element basis and the element order is not affected. +.. _int_ctpop: + '``llvm.ctpop.*``' Intrinsic ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -22162,6 +22164,53 @@ %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison +.. _int_vp_ctpop: + +'``llvm.vp.ctpop.*``' Intrinsics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" +This is an overloaded intrinsic. + +:: + + declare <16 x i32> @llvm.vp.ctpop.v16i32 (<16 x i32> , <16 x i1> , i32 ) + declare @llvm.vp.ctpop.nxv4i32 ( , , i32 ) + declare <256 x i64> @llvm.vp.ctpop.v256i64 (<256 x i64> , <256 x i1> , i32 ) + +Overview: +""""""""" + +Predicated ctpop of a vector of integers. + + +Arguments: +"""""""""" + +The first operand and the result have the same vector of integer type. The +second operand is the vector mask and has the same number of elements as the +result vector type. The third operand is the explicit vector length of the +operation. + +Semantics: +"""""""""" + +The '``llvm.vp.ctpop``' intrinsic performs ctpop (:ref:`ctpop `) of the first operand on each +enabled lane. The result on disabled lanes is a :ref:`poison value `. + +Examples: +""""""""" + +.. code-block:: llvm + + %r = call <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32> %a, <4 x i1> %mask, i32 %evl) + ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r + + %t = call <4 x i32> @llvm.ctpop.v4i32(<4 x i32> %a) + %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison + + .. _int_vp_fshl: '``llvm.vp.fshl.*``' Intrinsics diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h --- a/llvm/include/llvm/CodeGen/TargetLowering.h +++ b/llvm/include/llvm/CodeGen/TargetLowering.h @@ -4918,6 +4918,10 @@ /// \returns The expansion result or SDValue() if it fails. SDValue expandCTPOP(SDNode *N, SelectionDAG &DAG) const; + /// Expand VP_CTPOP nodes. + /// \returns The expansion result or SDValue() if it fails. + SDValue expandVPCTPOP(SDNode *N, SelectionDAG &DAG) const; + /// Expand CTLZ/CTLZ_ZERO_UNDEF nodes. Expands vector/scalar CTLZ nodes, /// vector nodes can only succeed if all operations are legal/custom. /// \param N Node to expand diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1566,6 +1566,10 @@ [ LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty]>; + def int_vp_ctpop : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty]>; def int_vp_fshl : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], [ LLVMMatchType<0>, LLVMMatchType<0>, diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -224,6 +224,10 @@ BEGIN_REGISTER_VP(vp_bitreverse, 1, 2, VP_BITREVERSE, -1) END_REGISTER_VP(vp_bitreverse, VP_BITREVERSE) +// llvm.vp.ctpop(x,mask,vlen) +BEGIN_REGISTER_VP(vp_ctpop, 1, 2, VP_CTPOP, -1) +END_REGISTER_VP(vp_ctpop, VP_CTPOP) + // llvm.vp.fshl(x,y,z,mask,vlen) BEGIN_REGISTER_VP(vp_fshl, 3, 4, VP_FSHL, -1) END_REGISTER_VP(vp_fshl, VP_FSHL) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorOps.cpp @@ -800,6 +800,12 @@ return; } break; + case ISD::VP_CTPOP: + if (SDValue Expanded = TLI.expandVPCTPOP(Node, DAG)) { + Results.push_back(Expanded); + return; + } + break; case ISD::CTLZ: case ISD::CTLZ_ZERO_UNDEF: if (SDValue Expanded = TLI.expandCTLZ(Node, DAG)) { diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1020,6 +1020,7 @@ case ISD::CTLZ_ZERO_UNDEF: case ISD::CTTZ_ZERO_UNDEF: case ISD::CTPOP: + case ISD::VP_CTPOP: case ISD::FABS: case ISD::VP_FABS: case ISD::FCEIL: case ISD::VP_FCEIL: @@ -4098,6 +4099,7 @@ case ISD::CTLZ: case ISD::CTLZ_ZERO_UNDEF: case ISD::CTPOP: + case ISD::VP_CTPOP: case ISD::CTTZ: case ISD::CTTZ_ZERO_UNDEF: case ISD::FNEG: case ISD::VP_FNEG: diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp --- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -8229,6 +8229,63 @@ DAG.getConstant(Len - 8, dl, ShVT)); } +SDValue TargetLowering::expandVPCTPOP(SDNode *Node, SelectionDAG &DAG) const { + SDLoc dl(Node); + EVT VT = Node->getValueType(0); + EVT ShVT = getShiftAmountTy(VT, DAG.getDataLayout()); + SDValue Op = Node->getOperand(0); + SDValue Mask = Node->getOperand(1); + SDValue VL = Node->getOperand(2); + unsigned Len = VT.getScalarSizeInBits(); + assert(VT.isInteger() && "VP_CTPOP not implemented for this type."); + + // TODO: Add support for irregular type lengths. + if (!(Len <= 128 && Len % 8 == 0)) + return SDValue(); + + // This is same algorithm of expandCTPOP from + // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel + SDValue Mask55 = + DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x55)), dl, VT); + SDValue Mask33 = + DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x33)), dl, VT); + SDValue Mask0F = + DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x0F)), dl, VT); + + SDValue Tmp1, Tmp2, Tmp3, Tmp4, Tmp5; + + // v = v - ((v >> 1) & 0x55555555...) + Tmp1 = DAG.getNode(ISD::VP_AND, dl, VT, + DAG.getNode(ISD::VP_LSHR, dl, VT, Op, + DAG.getConstant(1, dl, ShVT), Mask, VL), + Mask55, Mask, VL); + Op = DAG.getNode(ISD::VP_SUB, dl, VT, Op, Tmp1, Mask, VL); + + // v = (v & 0x33333333...) + ((v >> 2) & 0x33333333...) + Tmp2 = DAG.getNode(ISD::VP_AND, dl, VT, Op, Mask33, Mask, VL); + Tmp3 = DAG.getNode(ISD::VP_AND, dl, VT, + DAG.getNode(ISD::VP_LSHR, dl, VT, Op, + DAG.getConstant(2, dl, ShVT), Mask, VL), + Mask33, Mask, VL); + Op = DAG.getNode(ISD::VP_ADD, dl, VT, Tmp2, Tmp3, Mask, VL); + + // v = (v + (v >> 4)) & 0x0F0F0F0F... + Tmp4 = DAG.getNode(ISD::VP_LSHR, dl, VT, Op, DAG.getConstant(4, dl, ShVT), + Mask, VL), + Tmp5 = DAG.getNode(ISD::VP_ADD, dl, VT, Op, Tmp4, Mask, VL); + Op = DAG.getNode(ISD::VP_AND, dl, VT, Tmp5, Mask0F, Mask, VL); + + if (Len <= 8) + return Op; + + // v = (v * 0x01010101...) >> (Len - 8) + SDValue Mask01 = + DAG.getConstant(APInt::getSplat(Len, APInt(8, 0x01)), dl, VT); + return DAG.getNode(ISD::VP_LSHR, dl, VT, + DAG.getNode(ISD::VP_MUL, dl, VT, Op, Mask01, Mask, VL), + DAG.getConstant(Len - 8, dl, ShVT), Mask, VL); +} + SDValue TargetLowering::expandCTLZ(SDNode *Node, SelectionDAG &DAG) const { SDLoc dl(Node); EVT VT = Node->getValueType(0); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -615,6 +615,7 @@ setOperationAction(ISD::BSWAP, VT, Expand); setOperationAction({ISD::VP_BSWAP, ISD::VP_BITREVERSE}, VT, Expand); setOperationAction({ISD::VP_FSHL, ISD::VP_FSHR}, VT, Expand); + setOperationAction(ISD::VP_CTPOP, VT, Expand); // Custom-lower extensions and truncations from/to mask types. setOperationAction({ISD::ANY_EXTEND, ISD::SIGN_EXTEND, ISD::ZERO_EXTEND}, diff --git a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp --- a/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp +++ b/llvm/lib/Target/RISCV/RISCVTargetTransformInfo.cpp @@ -711,6 +711,41 @@ {Intrinsic::ctpop, MVT::nxv2i64, 21}, {Intrinsic::ctpop, MVT::nxv4i64, 21}, {Intrinsic::ctpop, MVT::nxv8i64, 21}, + {Intrinsic::vp_ctpop, MVT::v2i8, 12}, + {Intrinsic::vp_ctpop, MVT::v4i8, 12}, + {Intrinsic::vp_ctpop, MVT::v8i8, 12}, + {Intrinsic::vp_ctpop, MVT::v16i8, 12}, + {Intrinsic::vp_ctpop, MVT::nxv1i8, 12}, + {Intrinsic::vp_ctpop, MVT::nxv2i8, 12}, + {Intrinsic::vp_ctpop, MVT::nxv4i8, 12}, + {Intrinsic::vp_ctpop, MVT::nxv8i8, 12}, + {Intrinsic::vp_ctpop, MVT::nxv16i8, 12}, + {Intrinsic::vp_ctpop, MVT::v2i16, 19}, + {Intrinsic::vp_ctpop, MVT::v4i16, 19}, + {Intrinsic::vp_ctpop, MVT::v8i16, 19}, + {Intrinsic::vp_ctpop, MVT::v16i16, 19}, + {Intrinsic::vp_ctpop, MVT::nxv1i16, 19}, + {Intrinsic::vp_ctpop, MVT::nxv2i16, 19}, + {Intrinsic::vp_ctpop, MVT::nxv4i16, 19}, + {Intrinsic::vp_ctpop, MVT::nxv8i16, 19}, + {Intrinsic::vp_ctpop, MVT::nxv16i16, 19}, + {Intrinsic::vp_ctpop, MVT::v2i32, 20}, + {Intrinsic::vp_ctpop, MVT::v4i32, 20}, + {Intrinsic::vp_ctpop, MVT::v8i32, 20}, + {Intrinsic::vp_ctpop, MVT::v16i32, 20}, + {Intrinsic::vp_ctpop, MVT::nxv1i32, 20}, + {Intrinsic::vp_ctpop, MVT::nxv2i32, 20}, + {Intrinsic::vp_ctpop, MVT::nxv4i32, 20}, + {Intrinsic::vp_ctpop, MVT::nxv8i32, 20}, + {Intrinsic::vp_ctpop, MVT::nxv16i32, 20}, + {Intrinsic::vp_ctpop, MVT::v2i64, 21}, + {Intrinsic::vp_ctpop, MVT::v4i64, 21}, + {Intrinsic::vp_ctpop, MVT::v8i64, 21}, + {Intrinsic::vp_ctpop, MVT::v16i64, 21}, + {Intrinsic::vp_ctpop, MVT::nxv1i64, 21}, + {Intrinsic::vp_ctpop, MVT::nxv2i64, 21}, + {Intrinsic::vp_ctpop, MVT::nxv4i64, 21}, + {Intrinsic::vp_ctpop, MVT::nxv8i64, 21}, }; static unsigned getISDForVPIntrinsicID(Intrinsic::ID ID) { diff --git a/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll b/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll --- a/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll +++ b/llvm/test/Analysis/CostModel/RISCV/int-bit-manip.ll @@ -303,6 +303,85 @@ ret void } +define void @vp_ctpop() { +; CHECK-LABEL: 'vp_ctpop' +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %1 = call <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16> undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %2 = call <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16> undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %3 = call <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16> undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %4 = call <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16> undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %5 = call @llvm.vp.ctpop.nxv1i16( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %6 = call @llvm.vp.ctpop.nxv2i16( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %7 = call @llvm.vp.ctpop.nxv4i16( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %8 = call @llvm.vp.ctpop.nxv8i16( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %9 = call @llvm.vp.ctpop.nxv16i16( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %10 = call <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16> undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %11 = call <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16> undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %12 = call <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16> undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %13 = call <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16> undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %14 = call @llvm.vp.ctpop.nxv1i16( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %15 = call @llvm.vp.ctpop.nxv2i16( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %16 = call @llvm.vp.ctpop.nxv4i16( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %17 = call @llvm.vp.ctpop.nxv8i16( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 19 for instruction: %18 = call @llvm.vp.ctpop.nxv16i16( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %19 = call <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32> undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %20 = call <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32> undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %21 = call <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32> undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %22 = call <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32> undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %23 = call @llvm.vp.ctpop.nxv1i32( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %24 = call @llvm.vp.ctpop.nxv2i32( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %25 = call @llvm.vp.ctpop.nxv4i32( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %26 = call @llvm.vp.ctpop.nxv8i32( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 20 for instruction: %27 = call @llvm.vp.ctpop.nxv16i32( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %28 = call <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64> undef, <2 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %29 = call <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64> undef, <4 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %30 = call <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64> undef, <8 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %31 = call <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64> undef, <16 x i1> undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %32 = call @llvm.vp.ctpop.nxv1i64( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %33 = call @llvm.vp.ctpop.nxv2i64( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %34 = call @llvm.vp.ctpop.nxv4i64( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 21 for instruction: %35 = call @llvm.vp.ctpop.nxv8i64( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 42 for instruction: %36 = call @llvm.vp.ctpop.nxv16i64( undef, undef, i32 undef) +; CHECK-NEXT: Cost Model: Found an estimated cost of 1 for instruction: ret void +; + call <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16> undef, <2 x i1> undef, i32 undef) + call <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16> undef, <4 x i1> undef, i32 undef) + call <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16> undef, <8 x i1> undef, i32 undef) + call <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16> undef, <16 x i1> undef, i32 undef) + call @llvm.vp.ctpop.nvx1i16( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx2i16( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx4i16( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx8i16( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx16i16( undef, undef, i32 undef) + call <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16> undef, <2 x i1> undef, i32 undef) + call <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16> undef, <4 x i1> undef, i32 undef) + call <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16> undef, <8 x i1> undef, i32 undef) + call <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16> undef, <16 x i1> undef, i32 undef) + call @llvm.vp.ctpop.nvx1i16( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx2i16( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx4i16( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx8i16( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx16i16( undef, undef, i32 undef) + call <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32> undef, <2 x i1> undef, i32 undef) + call <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32> undef, <4 x i1> undef, i32 undef) + call <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32> undef, <8 x i1> undef, i32 undef) + call <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32> undef, <16 x i1> undef, i32 undef) + call @llvm.vp.ctpop.nvx1i32( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx2i32( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx4i32( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx8i32( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx16i32( undef, undef, i32 undef) + call <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64> undef, <2 x i1> undef, i32 undef) + call <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64> undef, <4 x i1> undef, i32 undef) + call <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64> undef, <8 x i1> undef, i32 undef) + call <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64> undef, <16 x i1> undef, i32 undef) + call @llvm.vp.ctpop.nvx1i64( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx2i64( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx4i64( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx8i64( undef, undef, i32 undef) + call @llvm.vp.ctpop.nvx16i64( undef, undef, i32 undef) + ret void +} + declare i16 @llvm.bswap.i16(i16) declare <2 x i16> @llvm.bswap.v2i16(<2 x i16>) declare <4 x i16> @llvm.bswap.v4i16(<4 x i16>) @@ -443,3 +522,40 @@ declare @llvm.vp.bswap.nvx4i64(, , i32) declare @llvm.vp.bswap.nvx8i64(, , i32) declare @llvm.vp.bswap.nvx16i64(, , i32) + +declare <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8>, <2 x i1>, i32) +declare <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8>, <4 x i1>, i32) +declare <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8>, <8 x i1>, i32) +declare <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8>, <16 x i1>, i32) +declare @llvm.vp.ctpop.nvx1i8(, , i32) +declare @llvm.vp.ctpop.nvx2i8(, , i32) +declare @llvm.vp.ctpop.nvx4i8(, , i32) +declare @llvm.vp.ctpop.nvx8i8(, , i32) +declare @llvm.vp.ctpop.nvx16i8(, , i32) +declare <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16>, <2 x i1>, i32) +declare <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16>, <4 x i1>, i32) +declare <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16>, <8 x i1>, i32) +declare <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16>, <16 x i1>, i32) +declare @llvm.vp.ctpop.nvx1i16(, , i32) +declare @llvm.vp.ctpop.nvx2i16(, , i32) +declare @llvm.vp.ctpop.nvx4i16(, , i32) +declare @llvm.vp.ctpop.nvx8i16(, , i32) +declare @llvm.vp.ctpop.nvx16i16(, , i32) +declare <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32>, <2 x i1>, i32) +declare <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32>, <4 x i1>, i32) +declare <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32>, <8 x i1>, i32) +declare <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32>, <16 x i1>, i32) +declare @llvm.vp.ctpop.nvx1i32(, , i32) +declare @llvm.vp.ctpop.nvx2i32(, , i32) +declare @llvm.vp.ctpop.nvx4i32(, , i32) +declare @llvm.vp.ctpop.nvx8i32(, , i32) +declare @llvm.vp.ctpop.nvx16i32(, , i32) +declare <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64>, <2 x i1>, i32) +declare <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64>, <4 x i1>, i32) +declare <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64>, <8 x i1>, i32) +declare <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64>, <16 x i1>, i32) +declare @llvm.vp.ctpop.nvx1i64(, , i32) +declare @llvm.vp.ctpop.nvx2i64(, , i32) +declare @llvm.vp.ctpop.nvx4i64(, , i32) +declare @llvm.vp.ctpop.nvx8i64(, , i32) +declare @llvm.vp.ctpop.nvx16i64(, , i32) diff --git a/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/ctpop-vp.ll @@ -0,0 +1,2793 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +declare @llvm.vp.ctpop.nxv1i8(, , i32) + +define @vp_ctpop_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctpop.nxv1i8( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv1i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv1i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv1i8( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv2i8(, , i32) + +define @vp_ctpop_nxv2i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctpop.nxv2i8( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv2i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv2i8( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv4i8(, , i32) + +define @vp_ctpop_nxv4i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctpop.nxv4i8( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv4i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv4i8( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv8i8(, , i32) + +define @vp_ctpop_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctpop.nxv8i8( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv8i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv8i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv8i8( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv16i8(, , i32) + +define @vp_ctpop_nxv16i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsrl.vi v10, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v10, v10, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v10, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v10, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v10, v8, v0.t +; CHECK-NEXT: vsrl.vi v10, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v10, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctpop.nxv16i8( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv16i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv16i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vsrl.vi v10, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v10, v10, a0 +; CHECK-NEXT: vsub.vv v8, v8, v10 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v10, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v10, v8 +; CHECK-NEXT: vsrl.vi v10, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v10 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv16i8( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv32i8(, , i32) + +define @vp_ctpop_nxv32i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsrl.vi v12, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v12, v12, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v12, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v12, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v12, v8, v0.t +; CHECK-NEXT: vsrl.vi v12, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v12, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctpop.nxv32i8( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv32i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv32i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vsrl.vi v12, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v12, v12, a0 +; CHECK-NEXT: vsub.vv v8, v8, v12 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v12, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v12, v8 +; CHECK-NEXT: vsrl.vi v12, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v12 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv32i8( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv64i8(, , i32) + +define @vp_ctpop_nxv64i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vsrl.vi v16, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v16, v16, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v16, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v16, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v16, v8, v0.t +; CHECK-NEXT: vsrl.vi v16, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v16, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.ctpop.nxv64i8( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv64i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_nxv64i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vsrl.vi v16, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v16, v16, a0 +; CHECK-NEXT: vsub.vv v8, v8, v16 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v16, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v16, v8 +; CHECK-NEXT: vsrl.vi v16, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v16 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv64i8( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv1i16(, , i32) + +define @vp_ctpop_nxv1i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv1i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv1i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv1i16( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv1i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv1i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv1i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv1i16( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv2i16(, , i32) + +define @vp_ctpop_nxv2i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv2i16( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv2i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv2i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv2i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv2i16( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv4i16(, , i32) + +define @vp_ctpop_nxv4i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv4i16( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv4i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv4i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv4i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv4i16( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv8i16(, , i32) + +define @vp_ctpop_nxv8i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv8i16( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv8i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv8i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv8i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv8i16( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv16i16(, , i32) + +define @vp_ctpop_nxv16i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv16i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv16i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv16i16( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv16i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv16i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv16i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv16i16( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv32i16(, , i32) + +define @vp_ctpop_nxv32i16( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv32i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv32i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv32i16( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv32i16_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv32i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv32i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv32i16( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv1i32(, , i32) + +define @vp_ctpop_nxv1i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv1i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv1i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv1i32( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv1i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv1i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv1i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv1i32( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv2i32(, , i32) + +define @vp_ctpop_nxv2i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv2i32( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv2i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv2i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv2i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv2i32( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv4i32(, , i32) + +define @vp_ctpop_nxv4i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv4i32( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv4i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv4i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv4i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv4i32( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv8i32(, , i32) + +define @vp_ctpop_nxv8i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv8i32( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv8i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv8i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv8i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv8i32( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv16i32(, , i32) + +define @vp_ctpop_nxv16i32( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv16i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv16i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv16i32( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv16i32_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv16i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v16, v16, a0 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v16, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv16i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v16, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv16i32( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv1i64(, , i32) + +define @vp_ctpop_nxv1i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI36_0) +; RV64-NEXT: ld a0, %lo(.LCPI36_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI36_1) +; RV64-NEXT: ld a1, %lo(.LCPI36_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI36_2) +; RV64-NEXT: ld a0, %lo(.LCPI36_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI36_3) +; RV64-NEXT: ld a1, %lo(.LCPI36_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv1i64( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv1i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv1i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vsetvli a2, zero, e64, m1, ta, ma +; RV32-NEXT: vlse64.v v9, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv1i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI37_0) +; RV64-NEXT: ld a0, %lo(.LCPI37_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI37_1) +; RV64-NEXT: ld a1, %lo(.LCPI37_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: vand.vx v9, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: lui a0, %hi(.LCPI37_2) +; RV64-NEXT: ld a0, %lo(.LCPI37_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI37_3) +; RV64-NEXT: ld a1, %lo(.LCPI37_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv1i64( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv2i64(, , i32) + +define @vp_ctpop_nxv2i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv2i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI38_0) +; RV64-NEXT: ld a0, %lo(.LCPI38_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI38_1) +; RV64-NEXT: ld a1, %lo(.LCPI38_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI38_2) +; RV64-NEXT: ld a0, %lo(.LCPI38_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI38_3) +; RV64-NEXT: ld a1, %lo(.LCPI38_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv2i64( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv2i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv2i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vsetvli a2, zero, e64, m2, ta, ma +; RV32-NEXT: vlse64.v v10, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv2i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI39_0) +; RV64-NEXT: ld a0, %lo(.LCPI39_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI39_1) +; RV64-NEXT: ld a1, %lo(.LCPI39_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: vand.vx v10, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: lui a0, %hi(.LCPI39_2) +; RV64-NEXT: ld a0, %lo(.LCPI39_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI39_3) +; RV64-NEXT: ld a1, %lo(.LCPI39_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv2i64( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv4i64(, , i32) + +define @vp_ctpop_nxv4i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv4i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI40_0) +; RV64-NEXT: ld a0, %lo(.LCPI40_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI40_1) +; RV64-NEXT: ld a1, %lo(.LCPI40_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI40_2) +; RV64-NEXT: ld a0, %lo(.LCPI40_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI40_3) +; RV64-NEXT: ld a1, %lo(.LCPI40_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv4i64( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv4i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv4i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vsetvli a2, zero, e64, m4, ta, ma +; RV32-NEXT: vlse64.v v12, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv4i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI41_0) +; RV64-NEXT: ld a0, %lo(.LCPI41_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI41_1) +; RV64-NEXT: ld a1, %lo(.LCPI41_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: vand.vx v12, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: lui a0, %hi(.LCPI41_2) +; RV64-NEXT: ld a0, %lo(.LCPI41_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI41_3) +; RV64-NEXT: ld a1, %lo(.LCPI41_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv4i64( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv7i64(, , i32) + +define @vp_ctpop_nxv7i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv7i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv7i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI42_0) +; RV64-NEXT: ld a0, %lo(.LCPI42_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI42_1) +; RV64-NEXT: ld a1, %lo(.LCPI42_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI42_2) +; RV64-NEXT: ld a0, %lo(.LCPI42_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI42_3) +; RV64-NEXT: ld a1, %lo(.LCPI42_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv7i64( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv7i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv7i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv7i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI43_0) +; RV64-NEXT: ld a0, %lo(.LCPI43_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI43_1) +; RV64-NEXT: ld a1, %lo(.LCPI43_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: lui a0, %hi(.LCPI43_2) +; RV64-NEXT: ld a0, %lo(.LCPI43_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI43_3) +; RV64-NEXT: ld a1, %lo(.LCPI43_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv7i64( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv8i64(, , i32) + +define @vp_ctpop_nxv8i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI44_0) +; RV64-NEXT: ld a0, %lo(.LCPI44_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI44_1) +; RV64-NEXT: ld a1, %lo(.LCPI44_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI44_2) +; RV64-NEXT: ld a0, %lo(.LCPI44_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI44_3) +; RV64-NEXT: ld a1, %lo(.LCPI44_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv8i64( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv8i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv8i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: addi a1, sp, 8 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vsetvli a2, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a1), zero +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv8i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI45_0) +; RV64-NEXT: ld a0, %lo(.LCPI45_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI45_1) +; RV64-NEXT: ld a1, %lo(.LCPI45_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: lui a0, %hi(.LCPI45_2) +; RV64-NEXT: ld a0, %lo(.LCPI45_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI45_3) +; RV64-NEXT: ld a1, %lo(.LCPI45_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv8i64( %va, %m, i32 %evl) + ret %v +} + +declare @llvm.vp.ctpop.nxv16i64(, , i32) + +define @vp_ctpop_nxv16i64( %va, %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv16i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 48 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x30, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 48 * vlenb +; RV32-NEXT: vmv1r.v v1, v0 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 40 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: srli a2, a1, 3 +; RV32-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV32-NEXT: vslidedown.vx v0, v0, a2 +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: sw a2, 12(sp) +; RV32-NEXT: sw a2, 8(sp) +; RV32-NEXT: sub a2, a0, a1 +; RV32-NEXT: sltu a3, a0, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a2, a3, a2 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v24, v16, 1, v0.t +; RV32-NEXT: addi a3, sp, 8 +; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a3), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 40 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v16, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v16, v16, v24, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 40 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a3), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 40 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 24 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v24, (a4) # Unknown-size Folded Spill +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 40 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v24, v24, 2, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 40 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v16, v24, v16, v0.t +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 24 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vl8r.v v24, (a4) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v16, v24, v16, v0.t +; RV32-NEXT: vsrl.vi v24, v16, 4, v0.t +; RV32-NEXT: vadd.vv v24, v16, v24, v0.t +; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a3), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 24 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v16, v0.t +; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a3), zero +; RV32-NEXT: addi a3, sp, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; RV32-NEXT: vmul.vv v16, v24, v16, v0.t +; RV32-NEXT: li a2, 56 +; RV32-NEXT: vsrl.vx v8, v16, a2, v0.t +; RV32-NEXT: csrr a3, vlenb +; RV32-NEXT: slli a3, a3, 3 +; RV32-NEXT: add a3, sp, a3 +; RV32-NEXT: addi a3, a3, 16 +; RV32-NEXT: vs8r.v v8, (a3) # Unknown-size Folded Spill +; RV32-NEXT: bltu a0, a1, .LBB46_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .LBB46_2: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v0, v1 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v24, v8, 1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v24, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v8, v8, a2, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 48 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv16i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV64-NEXT: vmv1r.v v24, v0 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: srli a2, a1, 3 +; RV64-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; RV64-NEXT: vslidedown.vx v0, v0, a2 +; RV64-NEXT: sub a2, a0, a1 +; RV64-NEXT: sltu a3, a0, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64-NEXT: lui a2, %hi(.LCPI46_0) +; RV64-NEXT: ld a3, %lo(.LCPI46_0)(a2) +; RV64-NEXT: lui a2, %hi(.LCPI46_1) +; RV64-NEXT: ld a2, %lo(.LCPI46_1)(a2) +; RV64-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vsub.vv v8, v16, v8, v0.t +; RV64-NEXT: vand.vx v16, v8, a2, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a2, v0.t +; RV64-NEXT: vadd.vv v16, v16, v8, v0.t +; RV64-NEXT: lui a4, %hi(.LCPI46_2) +; RV64-NEXT: ld a4, %lo(.LCPI46_2)(a4) +; RV64-NEXT: lui a5, %hi(.LCPI46_3) +; RV64-NEXT: ld a5, %lo(.LCPI46_3)(a5) +; RV64-NEXT: vsrl.vi v8, v16, 4, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vmul.vx v8, v8, a5, v0.t +; RV64-NEXT: li a6, 56 +; RV64-NEXT: vsrl.vx v8, v8, a6, v0.t +; RV64-NEXT: addi a7, sp, 16 +; RV64-NEXT: vs8r.v v8, (a7) # Unknown-size Folded Spill +; RV64-NEXT: bltu a0, a1, .LBB46_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: .LBB46_2: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vsrl.vi v8, v16, 1, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vsub.vv v8, v16, v8, v0.t +; RV64-NEXT: vand.vx v16, v8, a2, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a2, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a4, v0.t +; RV64-NEXT: vmul.vx v8, v8, a5, v0.t +; RV64-NEXT: vsrl.vx v8, v8, a6, v0.t +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %v = call @llvm.vp.ctpop.nxv16i64( %va, %m, i32 %evl) + ret %v +} + +define @vp_ctpop_nxv16i64_unmasked( %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_nxv16i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 5 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x20, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 32 * vlenb +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a1, 8(sp) +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: sub a2, a0, a1 +; RV32-NEXT: sltu a3, a0, a2 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a2, a3, a2 +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v24, v16, 1 +; RV32-NEXT: addi a3, sp, 8 +; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v0, (a3), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 24 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v0, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v24, v24, v0 +; RV32-NEXT: vsub.vv v16, v16, v24 +; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v0, (a3), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v16, v0 +; RV32-NEXT: vsrl.vi v16, v16, 2 +; RV32-NEXT: vand.vv v16, v16, v0 +; RV32-NEXT: vadd.vv v16, v24, v16 +; RV32-NEXT: vsrl.vi v24, v16, 4 +; RV32-NEXT: vadd.vv v24, v16, v24 +; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v16, (a3), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: slli a4, a4, 4 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v16, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vand.vv v16, v24, v16 +; RV32-NEXT: vsetvli a4, zero, e64, m8, ta, ma +; RV32-NEXT: vlse64.v v24, (a3), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; RV32-NEXT: vmul.vv v16, v16, v24 +; RV32-NEXT: li a2, 56 +; RV32-NEXT: vsrl.vx v16, v16, a2 +; RV32-NEXT: addi a3, sp, 16 +; RV32-NEXT: vs8r.v v16, (a3) # Unknown-size Folded Spill +; RV32-NEXT: bltu a0, a1, .LBB47_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: mv a0, a1 +; RV32-NEXT: .LBB47_2: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 24 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v24, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: vand.vv v16, v8, v0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: vsrl.vx v8, v8, a2 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_nxv16i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: sub a2, a0, a1 +; RV64-NEXT: sltu a3, a0, a2 +; RV64-NEXT: addi a3, a3, -1 +; RV64-NEXT: and a2, a3, a2 +; RV64-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV64-NEXT: lui a2, %hi(.LCPI47_0) +; RV64-NEXT: ld a2, %lo(.LCPI47_0)(a2) +; RV64-NEXT: lui a3, %hi(.LCPI47_1) +; RV64-NEXT: ld a3, %lo(.LCPI47_1)(a3) +; RV64-NEXT: vsrl.vi v24, v16, 1 +; RV64-NEXT: vand.vx v24, v24, a2 +; RV64-NEXT: vsub.vv v16, v16, v24 +; RV64-NEXT: vand.vx v24, v16, a3 +; RV64-NEXT: vsrl.vi v16, v16, 2 +; RV64-NEXT: vand.vx v16, v16, a3 +; RV64-NEXT: vadd.vv v16, v24, v16 +; RV64-NEXT: lui a4, %hi(.LCPI47_2) +; RV64-NEXT: ld a4, %lo(.LCPI47_2)(a4) +; RV64-NEXT: lui a5, %hi(.LCPI47_3) +; RV64-NEXT: ld a5, %lo(.LCPI47_3)(a5) +; RV64-NEXT: vsrl.vi v24, v16, 4 +; RV64-NEXT: vadd.vv v16, v16, v24 +; RV64-NEXT: vand.vx v16, v16, a4 +; RV64-NEXT: vmul.vx v16, v16, a5 +; RV64-NEXT: li a6, 56 +; RV64-NEXT: vsrl.vx v16, v16, a6 +; RV64-NEXT: bltu a0, a1, .LBB47_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: mv a0, a1 +; RV64-NEXT: .LBB47_2: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v24, v8, 1 +; RV64-NEXT: vand.vx v24, v24, a2 +; RV64-NEXT: vsub.vv v8, v8, v24 +; RV64-NEXT: vand.vx v24, v8, a3 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a3 +; RV64-NEXT: vadd.vv v8, v24, v8 +; RV64-NEXT: vsrl.vi v24, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v24 +; RV64-NEXT: vand.vx v8, v8, a4 +; RV64-NEXT: vmul.vx v8, v8, a5 +; RV64-NEXT: vsrl.vx v8, v8, a6 +; RV64-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.ctpop.nxv16i64( %va, %m, i32 %evl) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-ctpop-vp.ll @@ -0,0 +1,2238 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK,RV64 + +declare <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8>, <2 x i1>, i32) + +define <2 x i8> @vp_ctpop_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl) + ret <2 x i8> %v +} + +define <2 x i8> @vp_ctpop_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_v2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i8> @llvm.vp.ctpop.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl) + ret <2 x i8> %v +} + +declare <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8>, <4 x i1>, i32) + +define <4 x i8> @vp_ctpop_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl) + ret <4 x i8> %v +} + +define <4 x i8> @vp_ctpop_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_v4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i8> @llvm.vp.ctpop.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl) + ret <4 x i8> %v +} + +declare <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8>, <8 x i1>, i32) + +define <8 x i8> @vp_ctpop_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl) + ret <8 x i8> %v +} + +define <8 x i8> @vp_ctpop_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_v8i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i8> @llvm.vp.ctpop.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl) + ret <8 x i8> %v +} + +declare <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8>, <16 x i1>, i32) + +define <16 x i8> @vp_ctpop_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1, v0.t +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0, v0.t +; CHECK-NEXT: vsub.vv v8, v8, v9, v0.t +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0, v0.t +; CHECK-NEXT: vsrl.vi v8, v8, 2, v0.t +; CHECK-NEXT: vand.vx v8, v8, a0, v0.t +; CHECK-NEXT: vadd.vv v8, v9, v8, v0.t +; CHECK-NEXT: vsrl.vi v9, v8, 4, v0.t +; CHECK-NEXT: vadd.vv v8, v8, v9, v0.t +; CHECK-NEXT: vand.vi v8, v8, 15, v0.t +; CHECK-NEXT: ret + %v = call <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl) + ret <16 x i8> %v +} + +define <16 x i8> @vp_ctpop_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_ctpop_v16i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vsrl.vi v9, v8, 1 +; CHECK-NEXT: li a0, 85 +; CHECK-NEXT: vand.vx v9, v9, a0 +; CHECK-NEXT: vsub.vv v8, v8, v9 +; CHECK-NEXT: li a0, 51 +; CHECK-NEXT: vand.vx v9, v8, a0 +; CHECK-NEXT: vsrl.vi v8, v8, 2 +; CHECK-NEXT: vand.vx v8, v8, a0 +; CHECK-NEXT: vadd.vv v8, v9, v8 +; CHECK-NEXT: vsrl.vi v9, v8, 4 +; CHECK-NEXT: vadd.vv v8, v8, v9 +; CHECK-NEXT: vand.vi v8, v8, 15 +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i8> @llvm.vp.ctpop.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl) + ret <16 x i8> %v +} + +declare <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16>, <2 x i1>, i32) + +define <2 x i16> @vp_ctpop_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v2i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v2i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl) + ret <2 x i16> %v +} + +define <2 x i16> @vp_ctpop_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v2i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v2i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i16> @llvm.vp.ctpop.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl) + ret <2 x i16> %v +} + +declare <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16>, <4 x i1>, i32) + +define <4 x i16> @vp_ctpop_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v4i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v4i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl) + ret <4 x i16> %v +} + +define <4 x i16> @vp_ctpop_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v4i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v4i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i16> @llvm.vp.ctpop.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl) + ret <4 x i16> %v +} + +declare <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16>, <8 x i1>, i32) + +define <8 x i16> @vp_ctpop_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v8i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v8i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl) + ret <8 x i16> %v +} + +define <8 x i16> @vp_ctpop_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v8i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v8i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i16> @llvm.vp.ctpop.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl) + ret <8 x i16> %v +} + +declare <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16>, <16 x i1>, i32) + +define <16 x i16> @vp_ctpop_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v16i16: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v16i16: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 8, v0.t +; RV64-NEXT: ret + %v = call <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl) + ret <16 x i16> %v +} + +define <16 x i16> @vp_ctpop_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v16i16_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 5 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 3 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 1 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: li a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 8 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v16i16_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 5 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 3 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 1 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: li a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 8 +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i16> @llvm.vp.ctpop.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl) + ret <16 x i16> %v +} + +declare <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32>, <2 x i1>, i32) + +define <2 x i32> @vp_ctpop_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v2i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v2i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl) + ret <2 x i32> %v +} + +define <2 x i32> @vp_ctpop_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v2i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v2i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i32> @llvm.vp.ctpop.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl) + ret <2 x i32> %v +} + +declare <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32>, <4 x i1>, i32) + +define <4 x i32> @vp_ctpop_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v4i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v9, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v4i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) + ret <4 x i32> %v +} + +define <4 x i32> @vp_ctpop_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v4i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v9, v9, a0 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v9, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v9, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v4i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v9, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i32> @llvm.vp.ctpop.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl) + ret <4 x i32> %v +} + +declare <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32>, <8 x i1>, i32) + +define <8 x i32> @vp_ctpop_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v8i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v8i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl) + ret <8 x i32> %v +} + +define <8 x i32> @vp_ctpop_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v8i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v10, v10, a0 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v10, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v8i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v10, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i32> @llvm.vp.ctpop.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl) + ret <8 x i32> %v +} + +declare <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32>, <16 x i1>, i32) + +define <16 x i32> @vp_ctpop_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v16i32: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0, v0.t +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v16i32: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 24, v0.t +; RV64-NEXT: ret + %v = call <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl) + ret <16 x i32> %v +} + +define <16 x i32> @vp_ctpop_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v16i32_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a0, 349525 +; RV32-NEXT: addi a0, a0, 1365 +; RV32-NEXT: vand.vx v12, v12, a0 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a0, 209715 +; RV32-NEXT: addi a0, a0, 819 +; RV32-NEXT: vand.vx v12, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a0, 61681 +; RV32-NEXT: addi a0, a0, -241 +; RV32-NEXT: vand.vx v8, v8, a0 +; RV32-NEXT: lui a0, 4112 +; RV32-NEXT: addi a0, a0, 257 +; RV32-NEXT: vmul.vx v8, v8, a0 +; RV32-NEXT: vsrl.vi v8, v8, 24 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v16i32_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: lui a0, 349525 +; RV64-NEXT: addiw a0, a0, 1365 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: lui a0, 209715 +; RV64-NEXT: addiw a0, a0, 819 +; RV64-NEXT: vand.vx v12, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: lui a0, 61681 +; RV64-NEXT: addiw a0, a0, -241 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: lui a0, 4112 +; RV64-NEXT: addiw a0, a0, 257 +; RV64-NEXT: vmul.vx v8, v8, a0 +; RV64-NEXT: vsrl.vi v8, v8, 24 +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i32> @llvm.vp.ctpop.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl) + ret <16 x i32> %v +} + +declare <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64>, <2 x i1>, i32) + +define <2 x i64> @vp_ctpop_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v2i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10, v0.t +; RV32-NEXT: vsub.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: vadd.vv v8, v10, v8, v0.t +; RV32-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v2i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI24_0) +; RV64-NEXT: ld a0, %lo(.LCPI24_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI24_1) +; RV64-NEXT: ld a1, %lo(.LCPI24_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1, v0.t +; RV64-NEXT: vand.vx v9, v9, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v9, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v9, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI24_2) +; RV64-NEXT: ld a0, %lo(.LCPI24_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI24_3) +; RV64-NEXT: ld a1, %lo(.LCPI24_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v9, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl) + ret <2 x i64> %v +} + +define <2 x i64> @vp_ctpop_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v2i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vsrl.vi v9, v8, 1 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v9, v9, v10 +; RV32-NEXT: vsub.vv v8, v8, v9 +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v10, v8, v9 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: vadd.vv v8, v10, v8 +; RV32-NEXT: vsrl.vi v9, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v9 +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vand.vv v8, v8, v9 +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 4, e32, m1, ta, ma +; RV32-NEXT: vmv.v.x v9, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v9 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v2i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI25_0) +; RV64-NEXT: ld a0, %lo(.LCPI25_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI25_1) +; RV64-NEXT: ld a1, %lo(.LCPI25_1)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 1 +; RV64-NEXT: vand.vx v9, v9, a0 +; RV64-NEXT: vsub.vv v8, v8, v9 +; RV64-NEXT: vand.vx v9, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v9, v8 +; RV64-NEXT: lui a0, %hi(.LCPI25_2) +; RV64-NEXT: ld a0, %lo(.LCPI25_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI25_3) +; RV64-NEXT: ld a1, %lo(.LCPI25_3)(a1) +; RV64-NEXT: vsrl.vi v9, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v9 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i64> @llvm.vp.ctpop.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl) + ret <2 x i64> %v +} + +declare <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64>, <4 x i1>, i32) + +define <4 x i64> @vp_ctpop_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v4i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12, v0.t +; RV32-NEXT: vsub.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: vadd.vv v8, v12, v8, v0.t +; RV32-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v4i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI26_0) +; RV64-NEXT: ld a0, %lo(.LCPI26_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI26_1) +; RV64-NEXT: ld a1, %lo(.LCPI26_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1, v0.t +; RV64-NEXT: vand.vx v10, v10, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v10, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v10, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI26_2) +; RV64-NEXT: ld a0, %lo(.LCPI26_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI26_3) +; RV64-NEXT: ld a1, %lo(.LCPI26_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v10, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) + ret <4 x i64> %v +} + +define <4 x i64> @vp_ctpop_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v4i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vsrl.vi v10, v8, 1 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v10, v10, v12 +; RV32-NEXT: vsub.vv v8, v8, v10 +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v12, v8, v10 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: vadd.vv v8, v12, v8 +; RV32-NEXT: vsrl.vi v10, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v10 +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vand.vv v8, v8, v10 +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 8, e32, m2, ta, ma +; RV32-NEXT: vmv.v.x v10, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v10 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v4i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI27_0) +; RV64-NEXT: ld a0, %lo(.LCPI27_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI27_1) +; RV64-NEXT: ld a1, %lo(.LCPI27_1)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 1 +; RV64-NEXT: vand.vx v10, v10, a0 +; RV64-NEXT: vsub.vv v8, v8, v10 +; RV64-NEXT: vand.vx v10, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v10, v8 +; RV64-NEXT: lui a0, %hi(.LCPI27_2) +; RV64-NEXT: ld a0, %lo(.LCPI27_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI27_3) +; RV64-NEXT: ld a1, %lo(.LCPI27_3)(a1) +; RV64-NEXT: vsrl.vi v10, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v10 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i64> @llvm.vp.ctpop.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl) + ret <4 x i64> %v +} + +declare <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64>, <8 x i1>, i32) + +define <8 x i64> @vp_ctpop_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16, v0.t +; RV32-NEXT: vsub.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI28_0) +; RV64-NEXT: ld a0, %lo(.LCPI28_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI28_1) +; RV64-NEXT: ld a1, %lo(.LCPI28_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1, v0.t +; RV64-NEXT: vand.vx v12, v12, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v12, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v12, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI28_2) +; RV64-NEXT: ld a0, %lo(.LCPI28_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI28_3) +; RV64-NEXT: ld a1, %lo(.LCPI28_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v12, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +define <8 x i64> @vp_ctpop_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v8i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vsrl.vi v12, v8, 1 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v12, v12, v16 +; RV32-NEXT: vsub.vv v8, v8, v12 +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v16, v8, v12 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v12, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v12 +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vand.vv v8, v8, v12 +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetivli zero, 16, e32, m4, ta, ma +; RV32-NEXT: vmv.v.x v12, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v12 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v8i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI29_0) +; RV64-NEXT: ld a0, %lo(.LCPI29_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI29_1) +; RV64-NEXT: ld a1, %lo(.LCPI29_1)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 1 +; RV64-NEXT: vand.vx v12, v12, a0 +; RV64-NEXT: vsub.vv v8, v8, v12 +; RV64-NEXT: vand.vx v12, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v12, v8 +; RV64-NEXT: lui a0, %hi(.LCPI29_2) +; RV64-NEXT: ld a0, %lo(.LCPI29_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI29_3) +; RV64-NEXT: ld a1, %lo(.LCPI29_3)(a1) +; RV64-NEXT: vsrl.vi v12, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v12 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i64> @llvm.vp.ctpop.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl) + ret <8 x i64> %v +} + +declare <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64>, <15 x i1>, i32) + +define <15 x i64> @vp_ctpop_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v15i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: li a2, 32 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v15i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI30_0) +; RV64-NEXT: ld a0, %lo(.LCPI30_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI30_1) +; RV64-NEXT: ld a1, %lo(.LCPI30_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI30_2) +; RV64-NEXT: ld a0, %lo(.LCPI30_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI30_3) +; RV64-NEXT: ld a1, %lo(.LCPI30_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl) + ret <15 x i64> %v +} + +define <15 x i64> @vp_ctpop_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v15i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: li a2, 32 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v15i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI31_0) +; RV64-NEXT: ld a0, %lo(.LCPI31_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI31_1) +; RV64-NEXT: ld a1, %lo(.LCPI31_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: lui a0, %hi(.LCPI31_2) +; RV64-NEXT: ld a0, %lo(.LCPI31_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI31_3) +; RV64-NEXT: ld a1, %lo(.LCPI31_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <15 x i1> poison, i1 true, i32 0 + %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer + %v = call <15 x i64> @llvm.vp.ctpop.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl) + ret <15 x i64> %v +} + +declare <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64>, <16 x i1>, i32) + +define <16 x i64> @vp_ctpop_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v16i64: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: li a2, 32 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24, v0.t +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: vadd.vv v8, v24, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v16i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI32_0) +; RV64-NEXT: ld a0, %lo(.LCPI32_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI32_1) +; RV64-NEXT: ld a1, %lo(.LCPI32_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a0, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a1, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a1, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a0, %hi(.LCPI32_2) +; RV64-NEXT: ld a0, %lo(.LCPI32_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI32_3) +; RV64-NEXT: ld a1, %lo(.LCPI32_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a0, v0.t +; RV64-NEXT: vmul.vx v8, v8, a1, v0.t +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0, v0.t +; RV64-NEXT: ret + %v = call <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl) + ret <16 x i64> %v +} + +define <16 x i64> @vp_ctpop_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v16i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a1, 349525 +; RV32-NEXT: addi a1, a1, 1365 +; RV32-NEXT: li a2, 32 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a1, 209715 +; RV32-NEXT: addi a1, a1, 819 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: vadd.vv v8, v24, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a1, 61681 +; RV32-NEXT: addi a1, a1, -241 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: lui a1, 4112 +; RV32-NEXT: addi a1, a1, 257 +; RV32-NEXT: vsetvli zero, a2, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a1 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: li a0, 56 +; RV32-NEXT: vsrl.vx v8, v8, a0 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v16i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: lui a0, %hi(.LCPI33_0) +; RV64-NEXT: ld a0, %lo(.LCPI33_0)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI33_1) +; RV64-NEXT: ld a1, %lo(.LCPI33_1)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 1 +; RV64-NEXT: vand.vx v16, v16, a0 +; RV64-NEXT: vsub.vv v8, v8, v16 +; RV64-NEXT: vand.vx v16, v8, a1 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a1 +; RV64-NEXT: vadd.vv v8, v16, v8 +; RV64-NEXT: lui a0, %hi(.LCPI33_2) +; RV64-NEXT: ld a0, %lo(.LCPI33_2)(a0) +; RV64-NEXT: lui a1, %hi(.LCPI33_3) +; RV64-NEXT: ld a1, %lo(.LCPI33_3)(a1) +; RV64-NEXT: vsrl.vi v16, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v16 +; RV64-NEXT: vand.vx v8, v8, a0 +; RV64-NEXT: vmul.vx v8, v8, a1 +; RV64-NEXT: li a0, 56 +; RV64-NEXT: vsrl.vx v8, v8, a0 +; RV64-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i64> @llvm.vp.ctpop.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl) + ret <16 x i64> %v +} + +declare <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64>, <32 x i1>, i32) + +define <32 x i64> @vp_ctpop_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v32i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 56 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x38, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 56 * vlenb +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 48 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV32-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV32-NEXT: li a2, 16 +; RV32-NEXT: vslidedown.vi v24, v0, 2 +; RV32-NEXT: mv a1, a0 +; RV32-NEXT: bltu a0, a2, .LBB34_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB34_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: li a3, 32 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: csrr a4, vlenb +; RV32-NEXT: li a5, 40 +; RV32-NEXT: mul a4, a4, a5 +; RV32-NEXT: add a4, sp, a4 +; RV32-NEXT: addi a4, a4, 16 +; RV32-NEXT: vs8r.v v8, (a4) # Unknown-size Folded Spill +; RV32-NEXT: vmv.v.x v8, a2 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: li a4, 24 +; RV32-NEXT: mul a2, a2, a4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a2, a2, a4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 5 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a2 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: li a4, 40 +; RV32-NEXT: mul a2, a2, a4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 5 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 5 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vl8r.v v16, (a2) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV32-NEXT: vand.vv v16, v16, v8, v0.t +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vl8r.v v8, (a2) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v16, v8, v16, v0.t +; RV32-NEXT: vsrl.vi v8, v16, 4, v0.t +; RV32-NEXT: vadd.vv v16, v16, v8, v0.t +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a2 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 5 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vand.vv v8, v16, v8, v0.t +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: li a1, 56 +; RV32-NEXT: vsrl.vx v8, v8, a1, v0.t +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; RV32-NEXT: addi a2, a0, -16 +; RV32-NEXT: sltu a0, a0, a2 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: vmv1r.v v0, v24 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a2, 48 +; RV32-NEXT: mul a0, a0, a2 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v8, v8, 1, v0.t +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vs8r.v v8, (a0) # Unknown-size Folded Spill +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a2, 24 +; RV32-NEXT: mul a0, a0, a2 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v16, v8, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a2, 48 +; RV32-NEXT: mul a0, a0, a2 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsub.vv v8, v16, v8, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a2, 40 +; RV32-NEXT: mul a0, a0, a2 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v16, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a2, 48 +; RV32-NEXT: mul a0, a0, a2 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vs8r.v v16, (a0) # Unknown-size Folded Spill +; RV32-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a2, 40 +; RV32-NEXT: mul a0, a0, a2 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a2, 48 +; RV32-NEXT: mul a0, a0, a2 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vadd.vv v8, v16, v8, v0.t +; RV32-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV32-NEXT: vadd.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16, v0.t +; RV32-NEXT: vsrl.vx v16, v8, a1, v0.t +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 56 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v32i64: +; RV64: # %bb.0: +; RV64-NEXT: addi sp, sp, -16 +; RV64-NEXT: .cfi_def_cfa_offset 16 +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 4 +; RV64-NEXT: sub sp, sp, a1 +; RV64-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; RV64-NEXT: csrr a1, vlenb +; RV64-NEXT: slli a1, a1, 3 +; RV64-NEXT: add a1, sp, a1 +; RV64-NEXT: addi a1, a1, 16 +; RV64-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV64-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; RV64-NEXT: li a2, 16 +; RV64-NEXT: vslidedown.vi v24, v0, 2 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB34_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB34_2: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: lui a1, %hi(.LCPI34_0) +; RV64-NEXT: ld a1, %lo(.LCPI34_0)(a1) +; RV64-NEXT: lui a2, %hi(.LCPI34_1) +; RV64-NEXT: ld a2, %lo(.LCPI34_1)(a2) +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a1, v0.t +; RV64-NEXT: vsub.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v16, v8, a2, v0.t +; RV64-NEXT: vsrl.vi v8, v8, 2, v0.t +; RV64-NEXT: vand.vx v8, v8, a2, v0.t +; RV64-NEXT: vadd.vv v8, v16, v8, v0.t +; RV64-NEXT: lui a3, %hi(.LCPI34_2) +; RV64-NEXT: ld a3, %lo(.LCPI34_2)(a3) +; RV64-NEXT: lui a4, %hi(.LCPI34_3) +; RV64-NEXT: ld a4, %lo(.LCPI34_3)(a4) +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vmul.vx v8, v8, a4, v0.t +; RV64-NEXT: li a5, 56 +; RV64-NEXT: vsrl.vx v8, v8, a5, v0.t +; RV64-NEXT: addi a6, sp, 16 +; RV64-NEXT: vs8r.v v8, (a6) # Unknown-size Folded Spill +; RV64-NEXT: addi a6, a0, -16 +; RV64-NEXT: sltu a0, a0, a6 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: and a0, a0, a6 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vmv1r.v v0, v24 +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 3 +; RV64-NEXT: add a0, sp, a0 +; RV64-NEXT: addi a0, a0, 16 +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: vsrl.vi v16, v8, 1, v0.t +; RV64-NEXT: vand.vx v16, v16, a1, v0.t +; RV64-NEXT: vsub.vv v16, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v16, a2, v0.t +; RV64-NEXT: vsrl.vi v16, v16, 2, v0.t +; RV64-NEXT: vand.vx v16, v16, a2, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vsrl.vi v16, v8, 4, v0.t +; RV64-NEXT: vadd.vv v8, v8, v16, v0.t +; RV64-NEXT: vand.vx v8, v8, a3, v0.t +; RV64-NEXT: vmul.vx v8, v8, a4, v0.t +; RV64-NEXT: vsrl.vx v16, v8, a5, v0.t +; RV64-NEXT: addi a0, sp, 16 +; RV64-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV64-NEXT: csrr a0, vlenb +; RV64-NEXT: slli a0, a0, 4 +; RV64-NEXT: add sp, sp, a0 +; RV64-NEXT: addi sp, sp, 16 +; RV64-NEXT: ret + %v = call <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl) + ret <32 x i64> %v +} + +define <32 x i64> @vp_ctpop_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { +; RV32-LABEL: vp_ctpop_v32i64_unmasked: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: li a2, 40 +; RV32-NEXT: mul a1, a1, a2 +; RV32-NEXT: sub sp, sp, a1 +; RV32-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x28, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 40 * vlenb +; RV32-NEXT: li a2, 16 +; RV32-NEXT: csrr a1, vlenb +; RV32-NEXT: slli a1, a1, 5 +; RV32-NEXT: add a1, sp, a1 +; RV32-NEXT: addi a1, a1, 16 +; RV32-NEXT: vs8r.v v16, (a1) # Unknown-size Folded Spill +; RV32-NEXT: mv a1, a0 +; RV32-NEXT: bltu a0, a2, .LBB35_2 +; RV32-NEXT: # %bb.1: +; RV32-NEXT: li a1, 16 +; RV32-NEXT: .LBB35_2: +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vsrl.vi v16, v8, 1 +; RV32-NEXT: lui a2, 349525 +; RV32-NEXT: addi a2, a2, 1365 +; RV32-NEXT: li a3, 32 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v24, a2 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: li a4, 24 +; RV32-NEXT: mul a2, a2, a4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs8r.v v24, (a2) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v16, v24 +; RV32-NEXT: vsub.vv v8, v8, v16 +; RV32-NEXT: lui a2, 209715 +; RV32-NEXT: addi a2, a2, 819 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v0, a2 +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vand.vv v16, v8, v0 +; RV32-NEXT: vsrl.vi v8, v8, 2 +; RV32-NEXT: vand.vv v8, v8, v0 +; RV32-NEXT: vadd.vv v8, v16, v8 +; RV32-NEXT: vsrl.vi v16, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v16 +; RV32-NEXT: lui a2, 61681 +; RV32-NEXT: addi a2, a2, -241 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v16, a2 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 4 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs8r.v v16, (a2) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vand.vv v24, v8, v16 +; RV32-NEXT: lui a2, 4112 +; RV32-NEXT: addi a2, a2, 257 +; RV32-NEXT: vsetvli zero, a3, e32, m8, ta, ma +; RV32-NEXT: vmv.v.x v8, a2 +; RV32-NEXT: addi a2, sp, 16 +; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; RV32-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV32-NEXT: vmul.vv v24, v24, v8 +; RV32-NEXT: li a1, 56 +; RV32-NEXT: vsrl.vx v8, v24, a1 +; RV32-NEXT: csrr a2, vlenb +; RV32-NEXT: slli a2, a2, 3 +; RV32-NEXT: add a2, sp, a2 +; RV32-NEXT: addi a2, a2, 16 +; RV32-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; RV32-NEXT: addi a2, a0, -16 +; RV32-NEXT: sltu a0, a0, a2 +; RV32-NEXT: addi a0, a0, -1 +; RV32-NEXT: and a0, a0, a2 +; RV32-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 5 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vsrl.vi v24, v8, 1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a2, 24 +; RV32-NEXT: mul a0, a0, a2 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v24, v24, v16 +; RV32-NEXT: vsub.vv v24, v8, v24 +; RV32-NEXT: vand.vv v8, v24, v0 +; RV32-NEXT: vsrl.vi v24, v24, 2 +; RV32-NEXT: vand.vv v24, v24, v0 +; RV32-NEXT: vadd.vv v8, v8, v24 +; RV32-NEXT: vsrl.vi v24, v8, 4 +; RV32-NEXT: vadd.vv v8, v8, v24 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 4 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vand.vv v8, v8, v16 +; RV32-NEXT: addi a0, sp, 16 +; RV32-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; RV32-NEXT: vmul.vv v8, v8, v16 +; RV32-NEXT: vsrl.vx v16, v8, a1 +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: slli a0, a0, 3 +; RV32-NEXT: add a0, sp, a0 +; RV32-NEXT: addi a0, a0, 16 +; RV32-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; RV32-NEXT: csrr a0, vlenb +; RV32-NEXT: li a1, 40 +; RV32-NEXT: mul a0, a0, a1 +; RV32-NEXT: add sp, sp, a0 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: vp_ctpop_v32i64_unmasked: +; RV64: # %bb.0: +; RV64-NEXT: li a2, 16 +; RV64-NEXT: mv a1, a0 +; RV64-NEXT: bltu a0, a2, .LBB35_2 +; RV64-NEXT: # %bb.1: +; RV64-NEXT: li a1, 16 +; RV64-NEXT: .LBB35_2: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: lui a1, %hi(.LCPI35_0) +; RV64-NEXT: ld a1, %lo(.LCPI35_0)(a1) +; RV64-NEXT: lui a2, %hi(.LCPI35_1) +; RV64-NEXT: ld a2, %lo(.LCPI35_1)(a2) +; RV64-NEXT: vsrl.vi v24, v8, 1 +; RV64-NEXT: vand.vx v24, v24, a1 +; RV64-NEXT: vsub.vv v8, v8, v24 +; RV64-NEXT: vand.vx v24, v8, a2 +; RV64-NEXT: vsrl.vi v8, v8, 2 +; RV64-NEXT: vand.vx v8, v8, a2 +; RV64-NEXT: vadd.vv v8, v24, v8 +; RV64-NEXT: lui a3, %hi(.LCPI35_2) +; RV64-NEXT: ld a3, %lo(.LCPI35_2)(a3) +; RV64-NEXT: lui a4, %hi(.LCPI35_3) +; RV64-NEXT: ld a4, %lo(.LCPI35_3)(a4) +; RV64-NEXT: vsrl.vi v24, v8, 4 +; RV64-NEXT: vadd.vv v8, v8, v24 +; RV64-NEXT: vand.vx v8, v8, a3 +; RV64-NEXT: vmul.vx v8, v8, a4 +; RV64-NEXT: li a5, 56 +; RV64-NEXT: vsrl.vx v8, v8, a5 +; RV64-NEXT: addi a6, a0, -16 +; RV64-NEXT: sltu a0, a0, a6 +; RV64-NEXT: addi a0, a0, -1 +; RV64-NEXT: and a0, a0, a6 +; RV64-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; RV64-NEXT: vsrl.vi v24, v16, 1 +; RV64-NEXT: vand.vx v24, v24, a1 +; RV64-NEXT: vsub.vv v16, v16, v24 +; RV64-NEXT: vand.vx v24, v16, a2 +; RV64-NEXT: vsrl.vi v16, v16, 2 +; RV64-NEXT: vand.vx v16, v16, a2 +; RV64-NEXT: vadd.vv v16, v24, v16 +; RV64-NEXT: vsrl.vi v24, v16, 4 +; RV64-NEXT: vadd.vv v16, v16, v24 +; RV64-NEXT: vand.vx v16, v16, a3 +; RV64-NEXT: vmul.vx v16, v16, a4 +; RV64-NEXT: vsrl.vx v16, v16, a5 +; RV64-NEXT: ret + %head = insertelement <32 x i1> poison, i1 true, i32 0 + %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer + %v = call <32 x i64> @llvm.vp.ctpop.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl) + ret <32 x i64> %v +} diff --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp --- a/llvm/unittests/IR/VPIntrinsicTest.cpp +++ b/llvm/unittests/IR/VPIntrinsicTest.cpp @@ -150,6 +150,8 @@ << "(<8 x i16>, <8 x i1>, i32) "; Str << " declare <8 x i16> @llvm.vp.bswap.v8i16" << "(<8 x i16>, <8 x i1>, i32) "; + Str << " declare <8 x i16> @llvm.vp.ctpop.v8i16" + << "(<8 x i16>, <8 x i1>, i32) "; Str << " declare <8 x i16> @llvm.vp.fshl.v8i16" << "(<8 x i16>, <8 x i16>, <8 x i16>, <8 x i1>, i32) "; Str << " declare <8 x i16> @llvm.vp.fshr.v8i16"