diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -13706,6 +13706,7 @@ information about the alignment of the pointer arguments to the code generator, providing opportunity for more efficient code generation. +.. _int_abs: '``llvm.abs.*``' Intrinsic ^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -19011,6 +19012,56 @@ %t = xor <4 x i32> %a, %b %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison +.. _int_vp_abs: + +'``llvm.vp.abs.*``' Intrinsics +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Syntax: +""""""" +This is an overloaded intrinsic. + +:: + + declare <16 x i32> @llvm.vp.abs.v16i32 (<16 x i32> , <16 x i1> , i32 , i1 ) + declare @llvm.vp.abs.nxv4i32 ( , , i32 , i1 ) + declare <256 x i64> @llvm.vp.abs.v256i64 (<256 x i64> , <256 x i1> , i32 , i1 ) + +Overview: +""""""""" + +Predicated abs of a vector of integers. + + +Arguments: +"""""""""" + +The first operand and the result have the same vector of integer type. The +second operand is the vector mask and has the same number of elements as the +result vector type. The third operand is the explicit vector length of the +operation. The fourth argument must be a constant and is a flag to indicate +whether the result value of the '``llvm.vp.abs``' intrinsic is a +:ref:`poison value ` if the argument is statically or dynamically +an ``INT_MIN`` value. + +Semantics: +"""""""""" + +The '``llvm.vp.abs``' intrinsic performs abs (:ref:`abs `) of the first operand on each +enabled lane. The result on disabled lanes is a :ref:`poison value `. + +Examples: +""""""""" + +.. code-block:: llvm + + %r = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %a, <4 x i1> %mask, i32 %evl, i1 false) + ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r + + %t = call <4 x i32> @llvm.abs.v4i32(<4 x i32> %a, i1 false) + %also.r = select <4 x i1> %mask, <4 x i32> %t, <4 x i32> poison + + .. _int_vp_smax: diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td --- a/llvm/include/llvm/IR/Intrinsics.td +++ b/llvm/include/llvm/IR/Intrinsics.td @@ -1538,6 +1538,11 @@ LLVMMatchType<0>, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty]>; + def int_vp_abs : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], + [ LLVMMatchType<0>, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, + llvm_i32_ty, + llvm_i1_ty]>; def int_vp_smin : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], [ LLVMMatchType<0>, LLVMMatchType<0>, @@ -1722,7 +1727,6 @@ [ llvm_anyvector_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_i32_ty]>; - // Shuffles def int_vp_select : DefaultAttrsIntrinsic<[ llvm_anyvector_ty ], [ LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -216,6 +216,10 @@ VP_PROPERTY_BINARYOP END_REGISTER_VP(vp_umax, VP_UMAX) +// llvm.vp.abs(x,mask,vlen,is_int_min_poison) +BEGIN_REGISTER_VP(vp_abs, 1, 2, VP_ABS, -1) +END_REGISTER_VP(vp_abs, VP_ABS) + // llvm.vp.bswap(x,mask,vlen) BEGIN_REGISTER_VP(vp_bswap, 1, 2, VP_BSWAP, -1) END_REGISTER_VP(vp_bswap, VP_BSWAP) diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp @@ -1011,6 +1011,7 @@ break; case ISD::ABS: + case ISD::VP_ABS: case ISD::BITREVERSE: case ISD::VP_BITREVERSE: case ISD::BSWAP: @@ -4096,6 +4097,7 @@ [[fallthrough]]; case ISD::ABS: + case ISD::VP_ABS: case ISD::BITREVERSE: case ISD::VP_BITREVERSE: case ISD::BSWAP: diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7781,11 +7781,13 @@ setValue(&VPIntrin, N); break; } + case ISD::VP_ABS: case ISD::VP_CTLZ: case ISD::VP_CTLZ_ZERO_UNDEF: case ISD::VP_CTTZ: case ISD::VP_CTTZ_ZERO_UNDEF: { - // Pop is_zero_poison operand. + // Pop is_zero_poison operand for cp.ctlz/cttz or + // is_int_min_poison operand for vp.abs. OpValues.pop_back(); SDValue Result = DAG.getNode(Opcode, DL, VTs, OpValues); setValue(&VPIntrin, Result); diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -492,7 +492,8 @@ ISD::VP_MERGE, ISD::VP_SELECT, ISD::VP_FP_TO_SINT, ISD::VP_FP_TO_UINT, ISD::VP_SETCC, ISD::VP_SIGN_EXTEND, ISD::VP_ZERO_EXTEND, ISD::VP_TRUNCATE, ISD::VP_SMIN, - ISD::VP_SMAX, ISD::VP_UMIN, ISD::VP_UMAX}; + ISD::VP_SMAX, ISD::VP_UMIN, ISD::VP_UMAX, + ISD::VP_ABS}; static const unsigned FloatingPointVPOps[] = { ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL, @@ -4177,6 +4178,7 @@ return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL, /*HasMergeOp*/ true); case ISD::ABS: + case ISD::VP_ABS: return lowerABS(Op, DAG); case ISD::CTLZ_ZERO_UNDEF: case ISD::CTTZ_ZERO_UNDEF: @@ -6765,12 +6767,21 @@ MVT VT = Op.getSimpleValueType(); SDValue X = Op.getOperand(0); - assert(VT.isFixedLengthVector() && "Unexpected type"); + assert((Op.getOpcode() == ISD::VP_ABS || VT.isFixedLengthVector()) && + "Unexpected type for ISD::ABS"); - MVT ContainerVT = getContainerForFixedLengthVector(VT); - X = convertToScalableVector(ContainerVT, X, DAG, Subtarget); + MVT ContainerVT = VT; + if (VT.isFixedLengthVector()) { + ContainerVT = getContainerForFixedLengthVector(VT); + X = convertToScalableVector(ContainerVT, X, DAG, Subtarget); + } - auto [Mask, VL] = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); + SDValue Mask, VL; + if (Op->getOpcode() == ISD::VP_ABS) { + Mask = Op->getOperand(1); + VL = Op->getOperand(2); + } else + std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget); SDValue SplatZero = DAG.getNode( RISCVISD::VMV_V_X_VL, DL, ContainerVT, DAG.getUNDEF(ContainerVT), @@ -6780,7 +6791,9 @@ SDValue Max = DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, DAG.getUNDEF(ContainerVT), Mask, VL); - return convertFromScalableVector(VT, Max, DAG, Subtarget); + if (VT.isFixedLengthVector()) + Max = convertFromScalableVector(VT, Max, DAG, Subtarget); + return Max; } SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV( diff --git a/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/abs-vp.ll @@ -0,0 +1,682 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK + +declare @llvm.vp.abs.nxv1i8(, , i32, i1 immarg) + +define @vp_abs_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv1i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv1i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv1i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv1i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv2i8(, , i32, i1) + +define @vp_abs_nxv2i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv2i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv2i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv2i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv4i8(, , i32, i1) + +define @vp_abs_nxv4i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv4i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv4i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv4i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv8i8(, , i32, i1) + +define @vp_abs_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv8i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv8i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv8i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv8i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv16i8(, , i32, i1) + +define @vp_abs_nxv16i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv16i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv16i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv16i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv16i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv32i8(, , i32, i1) + +define @vp_abs_nxv32i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv32i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv32i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv32i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv32i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, ma +; CHECK-NEXT: vrsub.vi v12, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv32i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv64i8(, , i32, i1) + +define @vp_abs_nxv64i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv64i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv64i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv64i8_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv64i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv64i8( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv1i16(, , i32, i1) + +define @vp_abs_nxv1i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv1i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv1i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv1i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv1i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv1i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv2i16(, , i32, i1) + +define @vp_abs_nxv2i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv2i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv2i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv2i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv2i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv4i16(, , i32, i1) + +define @vp_abs_nxv4i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv4i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv4i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv4i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv4i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv8i16(, , i32, i1) + +define @vp_abs_nxv8i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv8i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv8i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv8i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv8i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv16i16(, , i32, i1) + +define @vp_abs_nxv16i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv16i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv16i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv16i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m4, ta, ma +; CHECK-NEXT: vrsub.vi v12, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv16i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv32i16(, , i32, i1) + +define @vp_abs_nxv32i16( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv32i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv32i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv32i16_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv32i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv32i16( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv1i32(, , i32, i1) + +define @vp_abs_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv1i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv1i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv1i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv1i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv2i32(, , i32, i1) + +define @vp_abs_nxv2i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv2i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv2i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv2i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv2i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv4i32(, , i32, i1) + +define @vp_abs_nxv4i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv4i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv4i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv4i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv4i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv8i32(, , i32, i1) + +define @vp_abs_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv8i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv8i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv8i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vrsub.vi v12, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv8i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv16i32(, , i32, i1) + +define @vp_abs_nxv16i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv16i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv16i32_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv16i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv16i32( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv1i64(, , i32, i1) + +define @vp_abs_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv1i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv1i64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv1i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v9 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv1i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv2i64(, , i32, i1) + +define @vp_abs_nxv2i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv2i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv2i64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv2i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v10 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv2i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv4i64(, , i32, i1) + +define @vp_abs_nxv4i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv4i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv4i64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv4i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vrsub.vi v12, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v12 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv4i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv7i64(, , i32, i1) + +define @vp_abs_nxv7i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv7i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv7i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv7i64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv7i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv7i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv8i64(, , i32, i1) + +define @vp_abs_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv8i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv8i64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv8i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v16 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv8i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +declare @llvm.vp.abs.nxv16i64(, , i32, i1) + +define @vp_abs_nxv16i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv16i64: +; CHECK: # %bb.0: +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: .cfi_def_cfa_offset 16 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 4 +; CHECK-NEXT: sub sp, sp, a1 +; CHECK-NEXT: .cfi_escape 0x0f, 0x0d, 0x72, 0x00, 0x11, 0x10, 0x22, 0x11, 0x10, 0x92, 0xa2, 0x38, 0x00, 0x1e, 0x22 # sp + 16 + 16 * vlenb +; CHECK-NEXT: vmv1r.v v24, v0 +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: slli a1, a1, 3 +; CHECK-NEXT: add a1, sp, a1 +; CHECK-NEXT: addi a1, a1, 16 +; CHECK-NEXT: vs8r.v v8, (a1) # Unknown-size Folded Spill +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: srli a2, a1, 3 +; CHECK-NEXT: vsetvli a3, zero, e8, mf4, ta, ma +; CHECK-NEXT: vslidedown.vx v0, v0, a2 +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v8, v16, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v16, v8, v0.t +; CHECK-NEXT: addi a2, sp, 16 +; CHECK-NEXT: vs8r.v v8, (a2) # Unknown-size Folded Spill +; CHECK-NEXT: bltu a0, a1, .LBB46_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB46_2: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 3 +; CHECK-NEXT: add a0, sp, a0 +; CHECK-NEXT: addi a0, a0, 16 +; CHECK-NEXT: vl8r.v v8, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t +; CHECK-NEXT: addi a0, sp, 16 +; CHECK-NEXT: vl8r.v v16, (a0) # Unknown-size Folded Reload +; CHECK-NEXT: csrr a0, vlenb +; CHECK-NEXT: slli a0, a0, 4 +; CHECK-NEXT: add sp, sp, a0 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: ret + %v = call @llvm.vp.abs.nxv16i64( %va, %m, i32 %evl, i1 true) + ret %v +} + +define @vp_abs_nxv16i64_unmasked( %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_nxv16i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: csrr a1, vlenb +; CHECK-NEXT: sub a2, a0, a1 +; CHECK-NEXT: sltu a3, a0, a2 +; CHECK-NEXT: addi a3, a3, -1 +; CHECK-NEXT: and a2, a3, a2 +; CHECK-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v24, v16, 0 +; CHECK-NEXT: vmax.vv v16, v16, v24 +; CHECK-NEXT: bltu a0, a1, .LBB47_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: mv a0, a1 +; CHECK-NEXT: .LBB47_2: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v24, v8, 0 +; CHECK-NEXT: vmax.vv v8, v8, v24 +; CHECK-NEXT: ret + %head = insertelement poison, i1 true, i32 0 + %m = shufflevector %head, poison, zeroinitializer + %v = call @llvm.vp.abs.nxv16i64( %va, %m, i32 %evl, i1 true) + ret %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-abs-vp.ll @@ -0,0 +1,538 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=ilp32d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK +; RUN: llc -mtriple=riscv64 -mattr=+d,+zfh,+experimental-zvfh,+v,+m -target-abi=lp64d -riscv-v-vector-bits-min=128 \ +; RUN: -verify-machineinstrs < %s | FileCheck %s --check-prefixes=CHECK + +declare <2 x i8> @llvm.vp.abs.v2i8(<2 x i8>, <2 x i1>, i32, i1 immarg) + +define <2 x i8> @vp_abs_v2i8(<2 x i8> %va, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v2i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i8> %v +} + +define <2 x i8> @vp_abs_v2i8_unmasked(<2 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v2i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i8> @llvm.vp.abs.v2i8(<2 x i8> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i8> %v +} + +declare <4 x i8> @llvm.vp.abs.v4i8(<4 x i8>, <4 x i1>, i32, i1 immarg) + +define <4 x i8> @vp_abs_v4i8(<4 x i8> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v4i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i8> %v +} + +define <4 x i8> @vp_abs_v4i8_unmasked(<4 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v4i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i8> @llvm.vp.abs.v4i8(<4 x i8> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i8> %v +} + +declare <8 x i8> @llvm.vp.abs.v8i8(<8 x i8>, <8 x i1>, i32, i1 immarg) + +define <8 x i8> @vp_abs_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i8> %v +} + +define <8 x i8> @vp_abs_v8i8_unmasked(<8 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v8i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i8> @llvm.vp.abs.v8i8(<8 x i8> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i8> %v +} + +declare <16 x i8> @llvm.vp.abs.v16i8(<16 x i8>, <16 x i1>, i32, i1 immarg) + +define <16 x i8> @vp_abs_v16i8(<16 x i8> %va, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v16i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i8> %v +} + +define <16 x i8> @vp_abs_v16i8_unmasked(<16 x i8> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v16i8_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i8> @llvm.vp.abs.v16i8(<16 x i8> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i8> %v +} + +declare <2 x i16> @llvm.vp.abs.v2i16(<2 x i16>, <2 x i1>, i32, i1 immarg) + +define <2 x i16> @vp_abs_v2i16(<2 x i16> %va, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v2i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i16> %v +} + +define <2 x i16> @vp_abs_v2i16_unmasked(<2 x i16> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v2i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf4, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i16> @llvm.vp.abs.v2i16(<2 x i16> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i16> %v +} + +declare <4 x i16> @llvm.vp.abs.v4i16(<4 x i16>, <4 x i1>, i32, i1 immarg) + +define <4 x i16> @vp_abs_v4i16(<4 x i16> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v4i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i16> %v +} + +define <4 x i16> @vp_abs_v4i16_unmasked(<4 x i16> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v4i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e16, mf2, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i16> @llvm.vp.abs.v4i16(<4 x i16> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i16> %v +} + +declare <8 x i16> @llvm.vp.abs.v8i16(<8 x i16>, <8 x i1>, i32, i1 immarg) + +define <8 x i16> @vp_abs_v8i16(<8 x i16> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i16> %v +} + +define <8 x i16> @vp_abs_v8i16_unmasked(<8 x i16> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v8i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e16, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i16> @llvm.vp.abs.v8i16(<8 x i16> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i16> %v +} + +declare <16 x i16> @llvm.vp.abs.v16i16(<16 x i16>, <16 x i1>, i32, i1 immarg) + +define <16 x i16> @vp_abs_v16i16(<16 x i16> %va, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v16i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i16> %v +} + +define <16 x i16> @vp_abs_v16i16_unmasked(<16 x i16> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v16i16_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e16, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i16> @llvm.vp.abs.v16i16(<16 x i16> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i16> %v +} + +declare <2 x i32> @llvm.vp.abs.v2i32(<2 x i32>, <2 x i1>, i32, i1 immarg) + +define <2 x i32> @vp_abs_v2i32(<2 x i32> %va, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v2i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i32> %v +} + +define <2 x i32> @vp_abs_v2i32_unmasked(<2 x i32> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v2i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i32> @llvm.vp.abs.v2i32(<2 x i32> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i32> %v +} + +declare <4 x i32> @llvm.vp.abs.v4i32(<4 x i32>, <4 x i1>, i32, i1 immarg) + +define <4 x i32> @vp_abs_v4i32(<4 x i32> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v4i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i32> %v +} + +define <4 x i32> @vp_abs_v4i32_unmasked(<4 x i32> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v4i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e32, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i32> @llvm.vp.abs.v4i32(<4 x i32> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i32> %v +} + +declare <8 x i32> @llvm.vp.abs.v8i32(<8 x i32>, <8 x i1>, i32, i1 immarg) + +define <8 x i32> @vp_abs_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i32> %v +} + +define <8 x i32> @vp_abs_v8i32_unmasked(<8 x i32> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v8i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i32> @llvm.vp.abs.v8i32(<8 x i32> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i32> %v +} + +declare <16 x i32> @llvm.vp.abs.v16i32(<16 x i32>, <16 x i1>, i32, i1 immarg) + +define <16 x i32> @vp_abs_v16i32(<16 x i32> %va, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v16i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i32> %v +} + +define <16 x i32> @vp_abs_v16i32_unmasked(<16 x i32> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v16i32_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i32> @llvm.vp.abs.v16i32(<16 x i32> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i32> %v +} + +declare <2 x i64> @llvm.vp.abs.v2i64(<2 x i64>, <2 x i1>, i32, i1 immarg) + +define <2 x i64> @vp_abs_v2i64(<2 x i64> %va, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v2i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i64> %v +} + +define <2 x i64> @vp_abs_v2i64_unmasked(<2 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v2i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf8, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vrsub.vi v9, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v9, v0.t +; CHECK-NEXT: ret + %head = insertelement <2 x i1> poison, i1 true, i32 0 + %m = shufflevector <2 x i1> %head, <2 x i1> poison, <2 x i32> zeroinitializer + %v = call <2 x i64> @llvm.vp.abs.v2i64(<2 x i64> %va, <2 x i1> %m, i32 %evl, i1 true) + ret <2 x i64> %v +} + +declare <4 x i64> @llvm.vp.abs.v4i64(<4 x i64>, <4 x i1>, i32, i1 immarg) + +define <4 x i64> @vp_abs_v4i64(<4 x i64> %va, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v4i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i64> %v +} + +define <4 x i64> @vp_abs_v4i64_unmasked(<4 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v4i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 4, e8, mf4, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e64, m2, ta, ma +; CHECK-NEXT: vrsub.vi v10, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v10, v0.t +; CHECK-NEXT: ret + %head = insertelement <4 x i1> poison, i1 true, i32 0 + %m = shufflevector <4 x i1> %head, <4 x i1> poison, <4 x i32> zeroinitializer + %v = call <4 x i64> @llvm.vp.abs.v4i64(<4 x i64> %va, <4 x i1> %m, i32 %evl, i1 true) + ret <4 x i64> %v +} + +declare <8 x i64> @llvm.vp.abs.v8i64(<8 x i64>, <8 x i1>, i32, i1 immarg) + +define <8 x i64> @vp_abs_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i64> %v +} + +define <8 x i64> @vp_abs_v8i64_unmasked(<8 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v8i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vrsub.vi v12, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v12, v0.t +; CHECK-NEXT: ret + %head = insertelement <8 x i1> poison, i1 true, i32 0 + %m = shufflevector <8 x i1> %head, <8 x i1> poison, <8 x i32> zeroinitializer + %v = call <8 x i64> @llvm.vp.abs.v8i64(<8 x i64> %va, <8 x i1> %m, i32 %evl, i1 true) + ret <8 x i64> %v +} + +declare <15 x i64> @llvm.vp.abs.v15i64(<15 x i64>, <15 x i1>, i32, i1 immarg) + +define <15 x i64> @vp_abs_v15i64(<15 x i64> %va, <15 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v15i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl, i1 true) + ret <15 x i64> %v +} + +define <15 x i64> @vp_abs_v15i64_unmasked(<15 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v15i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %head = insertelement <15 x i1> poison, i1 true, i32 0 + %m = shufflevector <15 x i1> %head, <15 x i1> poison, <15 x i32> zeroinitializer + %v = call <15 x i64> @llvm.vp.abs.v15i64(<15 x i64> %va, <15 x i1> %m, i32 %evl, i1 true) + ret <15 x i64> %v +} + +declare <16 x i64> @llvm.vp.abs.v16i64(<16 x i64>, <16 x i1>, i32, i1 immarg) + +define <16 x i64> @vp_abs_v16i64(<16 x i64> %va, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v16i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i64> %v +} + +define <16 x i64> @vp_abs_v16i64_unmasked(<16 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v16i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v16, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v16, v0.t +; CHECK-NEXT: ret + %head = insertelement <16 x i1> poison, i1 true, i32 0 + %m = shufflevector <16 x i1> %head, <16 x i1> poison, <16 x i32> zeroinitializer + %v = call <16 x i64> @llvm.vp.abs.v16i64(<16 x i64> %va, <16 x i1> %m, i32 %evl, i1 true) + ret <16 x i64> %v +} + +declare <32 x i64> @llvm.vp.abs.v32i64(<32 x i64>, <32 x i1>, i32, i1 immarg) + +define <32 x i64> @vp_abs_v32i64(<32 x i64> %va, <32 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v32i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 2, e8, mf4, ta, ma +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: vslidedown.vi v1, v0, 2 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: bltu a0, a2, .LBB34_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: .LBB34_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v24, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v24, v0.t +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmv1r.v v0, v1 +; CHECK-NEXT: vrsub.vi v24, v16, 0, v0.t +; CHECK-NEXT: vmax.vv v16, v16, v24, v0.t +; CHECK-NEXT: ret + %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl, i1 true) + ret <32 x i64> %v +} + +define <32 x i64> @vp_abs_v32i64_unmasked(<32 x i64> %va, i32 zeroext %evl) { +; CHECK-LABEL: vp_abs_v32i64_unmasked: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 16, e8, m1, ta, ma +; CHECK-NEXT: li a2, 16 +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: mv a1, a0 +; CHECK-NEXT: bltu a0, a2, .LBB35_2 +; CHECK-NEXT: # %bb.1: +; CHECK-NEXT: li a1, 16 +; CHECK-NEXT: .LBB35_2: +; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v24, v8, 0, v0.t +; CHECK-NEXT: vmax.vv v8, v8, v24, v0.t +; CHECK-NEXT: addi a1, a0, -16 +; CHECK-NEXT: sltu a0, a0, a1 +; CHECK-NEXT: addi a0, a0, -1 +; CHECK-NEXT: and a0, a0, a1 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vrsub.vi v24, v16, 0, v0.t +; CHECK-NEXT: vmax.vv v16, v16, v24, v0.t +; CHECK-NEXT: ret + %head = insertelement <32 x i1> poison, i1 true, i32 0 + %m = shufflevector <32 x i1> %head, <32 x i1> poison, <32 x i32> zeroinitializer + %v = call <32 x i64> @llvm.vp.abs.v32i64(<32 x i64> %va, <32 x i1> %m, i32 %evl, i1 true) + ret <32 x i64> %v +} diff --git a/llvm/unittests/IR/VPIntrinsicTest.cpp b/llvm/unittests/IR/VPIntrinsicTest.cpp --- a/llvm/unittests/IR/VPIntrinsicTest.cpp +++ b/llvm/unittests/IR/VPIntrinsicTest.cpp @@ -146,6 +146,8 @@ Str << " declare <8 x i1> @llvm.vp.icmp.v8i16" << "(<8 x i16>, <8 x i16>, metadata, <8 x i1>, i32) "; + Str << " declare <8 x i16> @llvm.vp.abs.v8i16" + << "(<8 x i16>, <8 x i1>, i32, i1 immarg) "; Str << " declare <8 x i16> @llvm.vp.bitreverse.v8i16" << "(<8 x i16>, <8 x i1>, i32) "; Str << " declare <8 x i16> @llvm.vp.bswap.v8i16"