diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.h b/llvm/lib/Target/RISCV/RISCVISelLowering.h --- a/llvm/lib/Target/RISCV/RISCVISelLowering.h +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.h @@ -652,6 +652,7 @@ SDValue lowerLogicVPOp(SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const; SDValue lowerVPExtMaskOp(SDValue Op, SelectionDAG &DAG) const; + SDValue lowerVPSetCCMaskOp(SDValue Op, SelectionDAG &DAG) const; SDValue lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc) const; SDValue lowerFixedLengthVectorExtendToRVV(SDValue Op, SelectionDAG &DAG, diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -506,8 +506,9 @@ VT, Expand); } - setOperationAction({ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_TRUNC}, VT, - Custom); + setOperationAction( + {ISD::VP_FPTOSI, ISD::VP_FPTOUI, ISD::VP_TRUNC, ISD::VP_SETCC}, VT, + Custom); } for (MVT VT : IntVecVTs) { @@ -3497,6 +3498,8 @@ case ISD::VP_UITOFP: return lowerVPFPIntConvOp(Op, DAG, RISCVISD::UINT_TO_FP_VL); case ISD::VP_SETCC: + if (Op.getOperand(0).getSimpleValueType().getVectorElementType() == MVT::i1) + return lowerVPSetCCMaskOp(Op, DAG); return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL); } } @@ -6099,6 +6102,85 @@ return convertFromScalableVector(VT, Result, DAG, Subtarget); } +SDValue RISCVTargetLowering::lowerVPSetCCMaskOp(SDValue Op, + SelectionDAG &DAG) const { + SDLoc DL(Op); + MVT VT = Op.getSimpleValueType(); + + SDValue Op1 = Op.getOperand(0); + SDValue Op2 = Op.getOperand(1); + ISD::CondCode Condition = cast(Op.getOperand(2))->get(); + // NOTE: Mask is dropped. + SDValue VL = Op.getOperand(4); + + MVT ContainerVT = VT; + if (VT.isFixedLengthVector()) { + ContainerVT = getContainerForFixedLengthVector(VT); + Op1 = convertToScalableVector(ContainerVT, Op1, DAG, Subtarget); + Op2 = convertToScalableVector(ContainerVT, Op2, DAG, Subtarget); + } + + SDValue Result; + SDValue AllOneMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL); + + switch (Condition) { + default: + break; + // X != Y --> (X^Y) + case ISD::SETNE: + Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL); + break; + // X == Y --> ~(X^Y) + case ISD::SETEQ: { + SDValue Temp = + DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, Op2, VL); + Result = + DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, AllOneMask, VL); + break; + } + // X >s Y --> X == 0 & Y == 1 --> ~X & Y + // X X == 0 & Y == 1 --> ~X & Y + case ISD::SETGT: + case ISD::SETULT: { + SDValue Temp = + DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL); + Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Temp, Op2, VL); + break; + } + // X X == 1 & Y == 0 --> ~Y & X + // X >u Y --> X == 1 & Y == 0 --> ~Y & X + case ISD::SETLT: + case ISD::SETUGT: { + SDValue Temp = + DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL); + Result = DAG.getNode(RISCVISD::VMAND_VL, DL, ContainerVT, Op1, Temp, VL); + break; + } + // X >=s Y --> X == 0 | Y == 1 --> ~X | Y + // X <=u Y --> X == 0 | Y == 1 --> ~X | Y + case ISD::SETGE: + case ISD::SETULE: { + SDValue Temp = + DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op1, AllOneMask, VL); + Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op2, VL); + break; + } + // X <=s Y --> X == 1 | Y == 0 --> ~Y | X + // X >=u Y --> X == 1 | Y == 0 --> ~Y | X + case ISD::SETLE: + case ISD::SETUGE: { + SDValue Temp = + DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Op2, AllOneMask, VL); + Result = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Temp, Op1, VL); + break; + } + } + + if (!VT.isFixedLengthVector()) + return Result; + return convertFromScalableVector(VT, Result, DAG, Subtarget); +} + // Lower Floating-Point/Integer Type-Convert VP SDNodes SDValue RISCVTargetLowering::lowerVPFPIntConvOp(SDValue Op, SelectionDAG &DAG, unsigned RISCVISDOpc) const { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-setcc-int-vp-mask.ll @@ -0,0 +1,414 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK +; RUN: llc -mtriple=riscv64 -mattr=+v -riscv-v-vector-bits-min=128 -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK + + +declare <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1>, <2 x i1>, metadata, <2 x i1>, i32) + +define <2 x i1> @icmp_eq_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_v2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"eq", <2 x i1> %m, i32 %evl) + ret <2 x i1> %v +} + +declare <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1>, <4 x i1>, metadata, <4 x i1>, i32) + +define <4 x i1> @icmp_eq_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"eq", <4 x i1> %m, i32 %evl) + ret <4 x i1> %v +} + +declare <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1>, <8 x i1>, metadata, <8 x i1>, i32) + +define <8 x i1> @icmp_eq_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_v8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +declare <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1>, <16 x i1>, metadata, <16 x i1>, i32) + +define <16 x i1> @icmp_eq_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_v16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"eq", <16 x i1> %m, i32 %evl) + ret <16 x i1> %v +} + +define <2 x i1> @icmp_ne_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_v2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"ne", <2 x i1> %m, i32 %evl) + ret <2 x i1> %v +} + +define <4 x i1> @icmp_ne_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"ne", <4 x i1> %m, i32 %evl) + ret <4 x i1> %v +} + +define <8 x i1> @icmp_ne_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_v8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <16 x i1> @icmp_ne_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_v16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"ne", <16 x i1> %m, i32 %evl) + ret <16 x i1> %v +} + +define <2 x i1> @icmp_slt_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_v2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"slt", <2 x i1> %m, i32 %evl) + ret <2 x i1> %v +} + +define <4 x i1> @icmp_slt_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"slt", <4 x i1> %m, i32 %evl) + ret <4 x i1> %v +} + +define <8 x i1> @icmp_slt_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_v8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <16 x i1> @icmp_slt_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_v16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"slt", <16 x i1> %m, i32 %evl) + ret <16 x i1> %v +} + +define <2 x i1> @icmp_ult_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_v2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"ult", <2 x i1> %m, i32 %evl) + ret <2 x i1> %v +} + +define <4 x i1> @icmp_ult_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"ult", <4 x i1> %m, i32 %evl) + ret <4 x i1> %v +} + +define <8 x i1> @icmp_ult_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_v8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <16 x i1> @icmp_ult_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_v16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"ult", <16 x i1> %m, i32 %evl) + ret <16 x i1> %v +} + +define <2 x i1> @icmp_sgt_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_v2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"sgt", <2 x i1> %m, i32 %evl) + ret <2 x i1> %v +} + +define <4 x i1> @icmp_sgt_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"sgt", <4 x i1> %m, i32 %evl) + ret <4 x i1> %v +} + +define <8 x i1> @icmp_sgt_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_v8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <16 x i1> @icmp_sgt_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_v16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"sgt", <16 x i1> %m, i32 %evl) + ret <16 x i1> %v +} + +define <2 x i1> @icmp_ugt_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_v2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"ugt", <2 x i1> %m, i32 %evl) + ret <2 x i1> %v +} + +define <4 x i1> @icmp_ugt_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"ugt", <4 x i1> %m, i32 %evl) + ret <4 x i1> %v +} + +define <8 x i1> @icmp_ugt_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_v8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <16 x i1> @icmp_ugt_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_v16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"ugt", <16 x i1> %m, i32 %evl) + ret <16 x i1> %v +} + +define <2 x i1> @icmp_sle_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_v2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"sle", <2 x i1> %m, i32 %evl) + ret <2 x i1> %v +} + +define <4 x i1> @icmp_sle_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"sle", <4 x i1> %m, i32 %evl) + ret <4 x i1> %v +} + +define <8 x i1> @icmp_sle_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_v8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <16 x i1> @icmp_sle_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_v16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"sle", <16 x i1> %m, i32 %evl) + ret <16 x i1> %v +} + +define <2 x i1> @icmp_ule_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ule_vv_v2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"ule", <2 x i1> %m, i32 %evl) + ret <2 x i1> %v +} + +define <4 x i1> @icmp_ule_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ule_vv_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"ule", <4 x i1> %m, i32 %evl) + ret <4 x i1> %v +} + +define <8 x i1> @icmp_ule_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ule_vv_v8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"ule", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <16 x i1> @icmp_ule_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ule_vv_v16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"ule", <16 x i1> %m, i32 %evl) + ret <16 x i1> %v +} + +define <2 x i1> @icmp_sge_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_v2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"sge", <2 x i1> %m, i32 %evl) + ret <2 x i1> %v +} + +define <4 x i1> @icmp_sge_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"sge", <4 x i1> %m, i32 %evl) + ret <4 x i1> %v +} + +define <8 x i1> @icmp_sge_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_v8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <16 x i1> @icmp_sge_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_v16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"sge", <16 x i1> %m, i32 %evl) + ret <16 x i1> %v +} + +define <2 x i1> @icmp_uge_vv_v2i1(<2 x i1> %va, <2 x i1> %vb, <2 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_v2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <2 x i1> @llvm.vp.icmp.v2i1(<2 x i1> %va, <2 x i1> %vb, metadata !"uge", <2 x i1> %m, i32 %evl) + ret <2 x i1> %v +} + +define <4 x i1> @icmp_uge_vv_v4i1(<4 x i1> %va, <4 x i1> %vb, <4 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_v4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <4 x i1> @llvm.vp.icmp.v4i1(<4 x i1> %va, <4 x i1> %vb, metadata !"uge", <4 x i1> %m, i32 %evl) + ret <4 x i1> %v +} + +define <8 x i1> @icmp_uge_vv_v8i1(<8 x i1> %va, <8 x i1> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_v8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i1(<8 x i1> %va, <8 x i1> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <16 x i1> @icmp_uge_vv_v16i1(<16 x i1> %va, <16 x i1> %vb, <16 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_v16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call <16 x i1> @llvm.vp.icmp.v16i1(<16 x i1> %va, <16 x i1> %vb, metadata !"uge", <16 x i1> %m, i32 %evl) + ret <16 x i1> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp-mask.ll @@ -0,0 +1,719 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK + +declare @llvm.vp.icmp.nxv1i1(, , metadata, , i32) + +define @icmp_eq_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +declare @llvm.vp.icmp.nxv2i1(, , metadata, , i32) + +define @icmp_eq_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +declare @llvm.vp.icmp.nxv4i1(, , metadata, , i32) + +define @icmp_eq_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +declare @llvm.vp.icmp.nxv8i1(, , metadata, , i32) + +define @icmp_eq_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +declare @llvm.vp.icmp.nxv16i1(, , metadata, , i32) + +define @icmp_eq_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +declare @llvm.vp.icmp.nxv32i1(, , metadata, , i32) + +define @icmp_eq_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +declare @llvm.vp.icmp.nxv64i1(, , metadata, , i32) + +define @icmp_eq_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmxor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmandn.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_ule_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ule_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"ule", %m, i32 %evl) + ret %v +} + +define @icmp_ule_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ule_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"ule", %m, i32 %evl) + ret %v +} + +define @icmp_ule_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ule_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"ule", %m, i32 %evl) + ret %v +} + +define @icmp_ule_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ule_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"ule", %m, i32 %evl) + ret %v +} + +define @icmp_ule_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ule_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"ule", %m, i32 %evl) + ret %v +} + +define @icmp_ule_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ule_vv_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"ule", %m, i32 %evl) + ret %v +} + +define @icmp_ule_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ule_vv_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"ule", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v0, v8 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv1i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv1i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i1( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv2i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv2i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv2i1( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv4i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv4i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv4i1( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv8i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv8i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i1( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv16i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv16i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m2, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv16i1( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv32i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv32i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m4, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv32i1( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv64i1( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv64i1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m8, ta, mu +; CHECK-NEXT: vmxnor.mm v0, v8, v0 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv64i1( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +}