diff --git a/llvm/include/llvm/CodeGen/SelectionDAG.h b/llvm/include/llvm/CodeGen/SelectionDAG.h --- a/llvm/include/llvm/CodeGen/SelectionDAG.h +++ b/llvm/include/llvm/CodeGen/SelectionDAG.h @@ -1074,6 +1074,20 @@ return getNode(ISD::SETCC, DL, VT, LHS, RHS, getCondCode(Cond)); } + /// Helper function to make it easier to build VP_SETCCs if you just have an + /// ISD::CondCode instead of an SDValue. + SDValue getSetCCVP(const SDLoc &DL, EVT VT, SDValue LHS, SDValue RHS, + ISD::CondCode Cond, SDValue Mask, SDValue EVL) { + assert(LHS.getValueType().isVector() == RHS.getValueType().isVector() && + "Cannot compare scalars to vectors"); + assert(LHS.getValueType().isVector() == VT.isVector() && + "Cannot compare scalars to vectors"); + assert(Cond != ISD::SETCC_INVALID && + "Cannot create a setCC of an invalid node."); + return getNode(ISD::VP_SETCC, DL, VT, LHS, RHS, getCondCode(Cond), Mask, + EVL); + } + /// Helper function to make it easier to build Select's if you just have /// operands and don't want to check for vector. SDValue getSelect(const SDLoc &DL, EVT VT, SDValue Cond, SDValue LHS, diff --git a/llvm/include/llvm/IR/VPIntrinsics.def b/llvm/include/llvm/IR/VPIntrinsics.def --- a/llvm/include/llvm/IR/VPIntrinsics.def +++ b/llvm/include/llvm/IR/VPIntrinsics.def @@ -273,6 +273,10 @@ VP_PROPERTY_CMP(2, false) END_REGISTER_VP(vp_icmp, VP_ICMP) +// VP_SETCC (ISel only) +BEGIN_REGISTER_VP_SDNODE(VP_SETCC, -1, vp_setcc, 3, 4) +END_REGISTER_VP_SDNODE(VP_SETCC) + ///// } Comparisons ///// Memory Operations { diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -1033,14 +1033,18 @@ case ISD::STRICT_FSETCC: case ISD::STRICT_FSETCCS: case ISD::SETCC: + case ISD::VP_SETCC: case ISD::BR_CC: { - unsigned CCOperand = Node->getOpcode() == ISD::SELECT_CC ? 4 : - Node->getOpcode() == ISD::STRICT_FSETCC ? 3 : - Node->getOpcode() == ISD::STRICT_FSETCCS ? 3 : - Node->getOpcode() == ISD::SETCC ? 2 : 1; - unsigned CompareOperand = Node->getOpcode() == ISD::BR_CC ? 2 : - Node->getOpcode() == ISD::STRICT_FSETCC ? 1 : - Node->getOpcode() == ISD::STRICT_FSETCCS ? 1 : 0; + unsigned Opc = Node->getOpcode(); + unsigned CCOperand = Opc == ISD::SELECT_CC ? 4 + : Opc == ISD::STRICT_FSETCC ? 3 + : Opc == ISD::STRICT_FSETCCS ? 3 + : (Opc == ISD::SETCC || Opc == ISD::VP_SETCC) ? 2 + : 1; + unsigned CompareOperand = Opc == ISD::BR_CC ? 2 + : Opc == ISD::STRICT_FSETCC ? 1 + : Opc == ISD::STRICT_FSETCCS ? 1 + : 0; MVT OpVT = Node->getOperand(CompareOperand).getSimpleValueType(); ISD::CondCode CCCode = cast(Node->getOperand(CCOperand))->get(); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.h @@ -574,6 +574,7 @@ SmallVectorImpl &OpValues); void visitVPStridedStore(const VPIntrinsic &VPIntrin, SmallVectorImpl &OpValues); + void visitVPCmp(const VPCmpIntrinsic &VPIntrin); void visitVectorPredicationIntrinsic(const VPIntrinsic &VPIntrin); void visitVAStart(const CallInst &I); diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -7313,7 +7313,12 @@ static unsigned getISDForVPIntrinsic(const VPIntrinsic &VPIntrin) { Optional ResOPC; - switch (VPIntrin.getIntrinsicID()) { + auto IID = VPIntrin.getIntrinsicID(); + // vp.fcmp and vp.icmp are handled specially + if (IID == Intrinsic::vp_fcmp || IID == Intrinsic::vp_icmp) + return ISD::VP_SETCC; + + switch (IID) { #define BEGIN_REGISTER_VP_INTRINSIC(VPID, ...) case Intrinsic::VPID: #define BEGIN_REGISTER_VP_SDNODE(VPSD, ...) ResOPC = ISD::VPSD; #define END_REGISTER_VP_INTRINSIC(VPID) break; @@ -7496,18 +7501,55 @@ setValue(&VPIntrin, ST); } +void SelectionDAGBuilder::visitVPCmp(const VPCmpIntrinsic &VPIntrin) { + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + SDLoc DL = getCurSDLoc(); + + ISD::CondCode Condition; + CmpInst::Predicate CondCode = VPIntrin.getPredicate(); + bool IsFP = VPIntrin.getOperand(0)->getType()->isFPOrFPVectorTy(); + if (IsFP) { + Condition = getFCmpCondCode(CondCode); + auto *FPMO = dyn_cast(&VPIntrin); + if ((FPMO && FPMO->hasNoNaNs()) || TM.Options.NoNaNsFPMath) + Condition = getFCmpCodeWithoutNaN(Condition); + + } else { + Condition = getICmpCondCode(CondCode); + } + + SDValue Op1 = getValue(VPIntrin.getOperand(0)); + SDValue Op2 = getValue(VPIntrin.getOperand(1)); + // #2 is the condition code + SDValue MaskOp = getValue(VPIntrin.getOperand(3)); + SDValue EVL = getValue(VPIntrin.getOperand(4)); + MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy(); + assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) && + "Unexpected target EVL type"); + EVL = DAG.getNode(ISD::ZERO_EXTEND, DL, EVLParamVT, EVL); + + EVT DestVT = DAG.getTargetLoweringInfo().getValueType(DAG.getDataLayout(), + VPIntrin.getType()); + setValue(&VPIntrin, + DAG.getSetCCVP(DL, DestVT, Op1, Op2, Condition, MaskOp, EVL)); +} + void SelectionDAGBuilder::visitVectorPredicationIntrinsic( const VPIntrinsic &VPIntrin) { SDLoc DL = getCurSDLoc(); unsigned Opcode = getISDForVPIntrinsic(VPIntrin); + auto IID = VPIntrin.getIntrinsicID(); + + if (const auto *CmpI = dyn_cast(&VPIntrin)) + return visitVPCmp(*CmpI); + SmallVector ValueVTs; const TargetLowering &TLI = DAG.getTargetLoweringInfo(); ComputeValueVTs(TLI, DAG.getDataLayout(), VPIntrin.getType(), ValueVTs); SDVTList VTs = DAG.getVTList(ValueVTs); - auto EVLParamPos = - VPIntrinsic::getVectorLengthParamPos(VPIntrin.getIntrinsicID()); + auto EVLParamPos = VPIntrinsic::getVectorLengthParamPos(IID); MVT EVLParamVT = TLI.getVPExplicitVectorLengthTy(); assert(EVLParamVT.isScalarInteger() && EVLParamVT.bitsGE(MVT::i32) && diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp --- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp @@ -490,14 +490,17 @@ ISD::VP_SHL, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND, ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX, ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN, - ISD::VP_MERGE, ISD::VP_SELECT, ISD::VP_FPTOSI}; + ISD::VP_MERGE, ISD::VP_SELECT, ISD::VP_FPTOSI, + ISD::VP_SETCC}; static const unsigned FloatingPointVPOps[] = { - ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL, - ISD::VP_FDIV, ISD::VP_FNEG, ISD::VP_FMA, - ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD, ISD::VP_REDUCE_FMIN, - ISD::VP_REDUCE_FMAX, ISD::VP_MERGE, ISD::VP_SELECT, - ISD::VP_SITOFP}; + ISD::VP_FADD, ISD::VP_FSUB, + ISD::VP_FMUL, ISD::VP_FDIV, + ISD::VP_FNEG, ISD::VP_FMA, + ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD, + ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX, + ISD::VP_MERGE, ISD::VP_SELECT, + ISD::VP_SITOFP, ISD::VP_SETCC}; if (!Subtarget.is64Bit()) { // We must custom-lower certain vXi64 operations on RV32 due to the vector @@ -849,6 +852,7 @@ setOperationAction(ISD::XOR, VT, Custom); setOperationAction(ISD::VP_FPTOSI, VT, Custom); + setOperationAction(ISD::VP_SETCC, VT, Custom); continue; } @@ -3688,6 +3692,8 @@ return lowerVPFPIntConvOp(Op, DAG, RISCVISD::FP_TO_SINT_VL); case ISD::VP_SITOFP: return lowerVPFPIntConvOp(Op, DAG, RISCVISD::SINT_TO_FP_VL); + case ISD::VP_SETCC: + return lowerVPOp(Op, DAG, RISCVISD::SETCC_VL); } } diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVVLPatterns.td @@ -458,6 +458,14 @@ (!cast(instruction_name#"_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), + vti.RegClass:$rs2, cc, + (vti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") + (vti.Mask (IMPLICIT_DEF)), + vti.RegClass:$rs1, vti.RegClass:$rs2, (vti.Mask V0), + GPR:$vl, vti.Log2SEW)>; } // Inherits from VPatIntegerSetCCVL_VV and adds a pattern with operands swapped. @@ -471,47 +479,84 @@ (!cast(instruction_name#"_VV_"#vti.LMul.MX) vti.RegClass:$rs1, vti.RegClass:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs2), + vti.RegClass:$rs1, invcc, + (vti.Mask V0), + VLOpFrag)), + (!cast(instruction_name#"_VV_"#vti.LMul.MX#"_MASK") + (vti.Mask (IMPLICIT_DEF)), + vti.RegClass:$rs1, vti.RegClass:$rs2, (vti.Mask V0), + GPR:$vl, vti.Log2SEW)>; } multiclass VPatIntegerSetCCVL_VX_Swappable { defvar instruction = !cast(instruction_name#"_VX_"#vti.LMul.MX); + defvar mask_instruction = !cast(instruction_name#"_VX_"#vti.LMul.MX#"_MASK"); def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), (SplatPat (XLenVT GPR:$rs2)), cc, (vti.Mask true_mask), VLOpFrag)), (instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), + (SplatPat (XLenVT GPR:$rs2)), cc, + (vti.Mask V0), VLOpFrag)), + (mask_instruction (vti.Mask (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, + (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), (vti.Vector vti.RegClass:$rs1), invcc, (vti.Mask true_mask), VLOpFrag)), (instruction vti.RegClass:$rs1, GPR:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat (XLenVT GPR:$rs2)), + (vti.Vector vti.RegClass:$rs1), invcc, + (vti.Mask V0), VLOpFrag)), + (mask_instruction (vti.Mask (IMPLICIT_DEF)), vti.RegClass:$rs1, GPR:$rs2, + (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; } multiclass VPatIntegerSetCCVL_VI_Swappable { defvar instruction = !cast(instruction_name#"_VI_"#vti.LMul.MX); + defvar mask_instruction = !cast(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), (SplatPat_simm5 simm5:$rs2), cc, (vti.Mask true_mask), VLOpFrag)), (instruction vti.RegClass:$rs1, XLenVT:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), + (SplatPat_simm5 simm5:$rs2), cc, + (vti.Mask V0), VLOpFrag)), + (mask_instruction (vti.Mask (IMPLICIT_DEF)), vti.RegClass:$rs1, XLenVT:$rs2, + (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2), (vti.Vector vti.RegClass:$rs1), invcc, (vti.Mask true_mask), VLOpFrag)), (instruction vti.RegClass:$rs1, simm5:$rs2, GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Mask (riscv_setcc_vl (SplatPat_simm5 simm5:$rs2), + (vti.Vector vti.RegClass:$rs1), invcc, + (vti.Mask V0), VLOpFrag)), + (mask_instruction (vti.Mask (IMPLICIT_DEF)), vti.RegClass:$rs1, simm5:$rs2, + (vti.Mask V0), GPR:$vl, vti.Log2SEW)>; } multiclass VPatIntegerSetCCVL_VIPlus1 { defvar instruction = !cast(instruction_name#"_VI_"#vti.LMul.MX); + defvar mask_instruction = !cast(instruction_name#"_VI_"#vti.LMul.MX#"_MASK"); def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), (splatpat_kind simm5:$rs2), cc, (vti.Mask true_mask), VLOpFrag)), (instruction vti.RegClass:$rs1, (DecImm simm5:$rs2), GPR:$vl, vti.Log2SEW)>; + def : Pat<(vti.Mask (riscv_setcc_vl (vti.Vector vti.RegClass:$rs1), + (splatpat_kind simm5:$rs2), cc, + (vti.Mask V0), VLOpFrag)), + (mask_instruction (vti.Mask (IMPLICIT_DEF)), vti.RegClass:$rs1, + (DecImm simm5:$rs2), (vti.Mask V0), + GPR:$vl, vti.Log2SEW)>; } multiclass VPatFPSetCCVL_VV_VF_FV @llvm.vp.icmp.v8i8(<8 x i8>, <8 x i8>, metadata, <8 x i1>, i32) + +define <8 x i1> @icmp_eq_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmseq.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_eq_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vx_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_eq_vx_swap_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vx_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_eq_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmseq.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_eq_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmseq.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsne.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vx_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsne.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vx_swap_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vx_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsne.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsne.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsne.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsltu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vx_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsgtu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vx_swap_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vx_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a1, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vx_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vx_swap_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vx_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsleu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsleu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsltu.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vx_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vx_swap_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vx_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsgtu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsleu.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmslt.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vx_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsgt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vx_swap_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vx_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a1, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vx_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vx_swap_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vx_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsle.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmslt.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vx_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vx_swap_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vx_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsgt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vv_v8i8(<8 x i8> %va, <8 x i8> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsle.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vx_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vx_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsle.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vx_swap_v8i8(<8 x i8> %va, i8 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vx_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 %b, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vi_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %va, <8 x i8> %vb, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vi_swap_v8i8(<8 x i8> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_swap_v8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e8, mf2, ta, mu +; CHECK-NEXT: vmv.v.i v9, 4 +; CHECK-NEXT: vsetvli zero, a0, e8, mf2, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i8> poison, i8 4, i32 0 + %vb = shufflevector <8 x i8> %elt.head, <8 x i8> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i8(<8 x i8> %vb, <8 x i8> %va, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +declare <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32>, <8 x i32>, metadata, <8 x i1>, i32) + +define <8 x i1> @icmp_eq_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmseq.vv v12, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_eq_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vx_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_eq_vx_swap_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vx_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmseq.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_eq_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmseq.vi v10, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_eq_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmseq.vi v10, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsne.vv v12, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vx_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vx_swap_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vx_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsne.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsne.vi v10, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsne.vi v10, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsltu.vv v12, v10, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vx_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vx_swap_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vx_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsgtu.vi v10, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsltu.vx v10, v8, a1, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsleu.vv v12, v10, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vx_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vmv.v.x v12, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsleu.vv v10, v12, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vx_swap_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vx_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsleu.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsgtu.vi v10, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsleu.vi v10, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsltu.vv v12, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vx_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsltu.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vx_swap_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vx_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsgtu.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsleu.vi v10, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsgtu.vi v10, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmslt.vv v12, v10, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vx_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vx_swap_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vx_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsgt.vi v10, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmslt.vx v10, v8, a1, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsle.vv v12, v10, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vx_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vmv.v.x v12, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsle.vv v10, v12, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vx_swap_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vx_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsgt.vi v10, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsle.vi v10, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmslt.vv v12, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vx_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmslt.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vx_swap_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vx_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsgt.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsle.vi v10, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsgt.vi v10, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vv_v8i32(<8 x i32> %va, <8 x i32> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsle.vv v12, v8, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vx_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vx_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsle.vx v10, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vx_swap_v8i32(<8 x i32> %va, i32 %b, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vx_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vmv.v.x v12, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, ma +; CHECK-NEXT: vmsle.vv v10, v12, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 %b, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vi_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsle.vi v10, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %va, <8 x i32> %vb, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vi_swap_v8i32(<8 x i32> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_swap_v8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e32, m2, ta, mu +; CHECK-NEXT: vmv.v.i v12, 4 +; CHECK-NEXT: vsetvli zero, a0, e32, m2, ta, ma +; CHECK-NEXT: vmsle.vv v10, v12, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i32> poison, i32 4, i32 0 + %vb = shufflevector <8 x i32> %elt.head, <8 x i32> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i32(<8 x i32> %vb, <8 x i32> %va, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +declare <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64>, <8 x i64>, metadata, <8 x i1>, i32) + +define <8 x i1> @icmp_eq_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmseq.vv v16, v8, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_eq_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_eq_vx_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmseq.vv v12, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_eq_vx_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmseq.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_eq_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_eq_vx_swap_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmseq.vv v12, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_eq_vx_swap_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmseq.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_eq_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmseq.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_eq_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_swap_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmseq.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"eq", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsne.vv v16, v8, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ne_vx_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmsne.vv v12, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ne_vx_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsne.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ne_vx_swap_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmsne.vv v12, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ne_vx_swap_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsne.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsne.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ne_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_swap_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsne.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"ne", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsltu.vv v16, v12, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ugt_vx_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmsltu.vv v12, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ugt_vx_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsgtu.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ugt_vx_swap_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmsltu.vv v12, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ugt_vx_swap_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsltu.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsgtu.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ugt_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_swap_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsltu.vx v12, v8, a1, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"ugt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsleu.vv v16, v12, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_uge_vx_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmsleu.vv v12, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_uge_vx_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vmv.v.x v16, a0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsleu.vv v12, v16, v8, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_uge_vx_swap_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmsleu.vv v12, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_uge_vx_swap_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsleu.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsgtu.vi v12, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_uge_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_swap_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsleu.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"uge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsltu.vv v16, v8, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ult_vx_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmsltu.vv v12, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ult_vx_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsltu.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ult_vx_swap_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmsltu.vv v12, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ult_vx_swap_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsgtu.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsleu.vi v12, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_ult_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_swap_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsgtu.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"ult", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmslt.vv v16, v12, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sgt_vx_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmslt.vv v12, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sgt_vx_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsgt.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sgt_vx_swap_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmslt.vv v12, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sgt_vx_swap_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmslt.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsgt.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sgt_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_swap_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmslt.vx v12, v8, a1, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"sgt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsle.vv v16, v12, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sge_vx_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmsle.vv v12, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sge_vx_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vmv.v.x v16, a0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsle.vv v12, v16, v8, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sge_vx_swap_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmsle.vv v12, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sge_vx_swap_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsle.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsgt.vi v12, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sge_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_swap_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsle.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"sge", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmslt.vv v16, v8, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_slt_vx_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmslt.vv v12, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_slt_vx_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmslt.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_slt_vx_swap_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmslt.vv v12, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_slt_vx_swap_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsgt.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsle.vi v12, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_slt_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_swap_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsgt.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"slt", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vv_v8i64(<8 x i64> %va, <8 x i64> %vb, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsle.vv v16, v8, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vx_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sle_vx_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmsle.vv v12, v8, v16, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sle_vx_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsle.vx v12, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vx_swap_v8i64(<8 x i64> %va, i64 %b, <8 x i1> %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sle_vx_swap_v8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v16, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m4, ta, ma +; RV32-NEXT: vmsle.vv v12, v16, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v12 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sle_vx_swap_v8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; RV64-NEXT: vmv.v.x v16, a0 +; RV64-NEXT: vsetvli zero, a1, e64, m4, ta, ma +; RV64-NEXT: vmsle.vv v12, v16, v8, v0.t +; RV64-NEXT: vmv1r.v v0, v12 +; RV64-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 %b, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vi_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsle.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %va, <8 x i64> %vb, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} + +define <8 x i1> @icmp_sle_vi_swap_v8i64(<8 x i64> %va, <8 x i1> %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_swap_v8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetivli zero, 8, e64, m4, ta, mu +; CHECK-NEXT: vmv.v.i v16, 4 +; CHECK-NEXT: vsetvli zero, a0, e64, m4, ta, ma +; CHECK-NEXT: vmsle.vv v12, v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement <8 x i64> poison, i64 4, i32 0 + %vb = shufflevector <8 x i64> %elt.head, <8 x i64> poison, <8 x i32> zeroinitializer + %v = call <8 x i1> @llvm.vp.icmp.v8i64(<8 x i64> %vb, <8 x i64> %va, metadata !"sle", <8 x i1> %m, i32 %evl) + ret <8 x i1> %v +} diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-int-vp.ll @@ -0,0 +1,3824 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+v -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK,RV32 +; RUN: llc -mtriple=riscv64 -mattr=+v -verify-machineinstrs < %s \ +; RUN: | FileCheck %s --check-prefixes=CHECK,RV64 + +; FIXME: We're missing canonicalizations of ISD::VP_SETCC equivalent to those +; for ISD::SETCC, e.g., splats aren't moved to the RHS. + +declare @llvm.vp.icmp.nxv1i8(, , metadata, , i32) + +define @icmp_eq_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmseq.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vx_swap_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vx_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vi_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmseq.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vi_swap_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmseq.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsne.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsne.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vx_swap_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vx_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsne.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vi_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsne.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vi_swap_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsne.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsltu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsgtu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vx_swap_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vx_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vi_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vi_swap_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a1, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vx_swap_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vx_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsleu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vi_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vi_swap_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsleu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsltu.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vx_swap_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vx_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsgtu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vi_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsleu.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vi_swap_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmslt.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsgt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vx_swap_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vx_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vi_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vi_swap_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a1, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vx_swap_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vx_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsle.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vi_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vi_swap_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmslt.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vx_swap_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vx_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsgt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vi_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vi_swap_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv1i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsle.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vx_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vx_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsle.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vx_swap_nxv1i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vx_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, mf8, ta, mu +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vi_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vi_swap_nxv1i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_swap_nxv1i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, mf8, ta, mu +; CHECK-NEXT: vmv.v.i v9, 4 +; CHECK-NEXT: vsetvli zero, a0, e8, mf8, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i8( %vb, %va, metadata !"sle", %m, i32 %evl) + ret %v +} + +declare @llvm.vp.icmp.nxv8i8(, , metadata, , i32) + +define @icmp_eq_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmseq.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vx_swap_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vx_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vi_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmseq.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vi_swap_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmseq.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsne.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsne.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vx_swap_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vx_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsne.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vi_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsne.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vi_swap_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsne.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsltu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsgtu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vx_swap_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vx_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vi_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vi_swap_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a1, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vx_swap_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vx_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsleu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vi_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vi_swap_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsleu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsltu.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vx_swap_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vx_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsgtu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vi_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsleu.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vi_swap_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmslt.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsgt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vx_swap_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vx_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vi_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vi_swap_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a1, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vx_swap_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vx_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsle.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vi_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vi_swap_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmslt.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vx_swap_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vx_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsgt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vi_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vi_swap_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv8i8( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsle.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vx_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsle.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vx_swap_nxv8i8( %va, i8 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vx_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e8, m1, ta, mu +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vi_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vi_swap_nxv8i8( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_swap_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8, m1, ta, mu +; CHECK-NEXT: vmv.v.i v9, 4 +; CHECK-NEXT: vsetvli zero, a0, e8, m1, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i8 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i8( %vb, %va, metadata !"sle", %m, i32 %evl) + ret %v +} + +declare @llvm.vp.icmp.nxv1i32(, , metadata, , i32) + +define @icmp_eq_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmseq.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vx_swap_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vx_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmseq.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vi_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmseq.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vi_swap_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmseq.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsne.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsne.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vx_swap_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vx_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsne.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vi_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsne.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vi_swap_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsne.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsltu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsgtu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vx_swap_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vx_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vi_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vi_swap_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a1, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vx_swap_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vx_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsleu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vi_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vi_swap_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsleu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsltu.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vx_swap_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vx_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsgtu.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vi_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsleu.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vi_swap_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmslt.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsgt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vx_swap_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vx_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vi_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vi_swap_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a1, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vx_swap_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vx_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsle.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vi_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vi_swap_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmslt.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vx_swap_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vx_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsgt.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vi_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vi_swap_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv1i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsle.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vx_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vx_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsle.vx v0, v8, a0, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vx_swap_nxv1i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vx_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e32, mf2, ta, mu +; CHECK-NEXT: vmv.v.x v9, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vi_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vi_swap_nxv1i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_swap_nxv1i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, mf2, ta, mu +; CHECK-NEXT: vmv.v.i v9, 4 +; CHECK-NEXT: vsetvli zero, a0, e32, mf2, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i32( %vb, %va, metadata !"sle", %m, i32 %evl) + ret %v +} + +declare @llvm.vp.icmp.nxv8i32(, , metadata, , i32) + +define @icmp_eq_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmseq.vv v16, v8, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmseq.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vx_swap_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vx_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmseq.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vi_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmseq.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vi_swap_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmseq.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsne.vv v16, v8, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsne.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vx_swap_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vx_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsne.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vi_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsne.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vi_swap_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsne.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsltu.vv v16, v12, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsgtu.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vx_swap_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vx_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsltu.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vi_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsgtu.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vi_swap_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsltu.vx v12, v8, a1, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsleu.vv v16, v12, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; CHECK-NEXT: vmv.v.x v16, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsleu.vv v12, v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vx_swap_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vx_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsleu.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vi_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsgtu.vi v12, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vi_swap_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsleu.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsltu.vv v16, v8, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsltu.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vx_swap_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vx_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsgtu.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vi_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsleu.vi v12, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vi_swap_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsgtu.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmslt.vv v16, v12, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsgt.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vx_swap_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vx_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmslt.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vi_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsgt.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vi_swap_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmslt.vx v12, v8, a1, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsle.vv v16, v12, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; CHECK-NEXT: vmv.v.x v16, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsle.vv v12, v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vx_swap_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vx_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsle.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vi_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsgt.vi v12, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vi_swap_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsle.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmslt.vv v16, v8, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmslt.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vx_swap_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vx_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsgt.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vi_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsle.vi v12, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vi_swap_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsgt.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv8i32( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsle.vv v16, v8, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vx_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsle.vx v12, v8, a0, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vx_swap_nxv8i32( %va, i32 %b, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vx_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e32, m4, ta, mu +; CHECK-NEXT: vmv.v.x v16, a0 +; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, ma +; CHECK-NEXT: vmsle.vv v12, v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vi_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsle.vi v12, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vi_swap_nxv8i32( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_swap_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32, m4, ta, mu +; CHECK-NEXT: vmv.v.i v16, 4 +; CHECK-NEXT: vsetvli zero, a0, e32, m4, ta, ma +; CHECK-NEXT: vmsle.vv v12, v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i32 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i32( %vb, %va, metadata !"sle", %m, i32 %evl) + ret %v +} + +declare @llvm.vp.icmp.nxv1i64(, , metadata, , i32) + +define @icmp_eq_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmseq.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_eq_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmseq.vv v0, v8, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_eq_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmseq.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vx_swap_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_eq_vx_swap_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmseq.vv v0, v9, v8, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_eq_vx_swap_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmseq.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vi_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmseq.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vi_swap_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_swap_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmseq.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsne.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ne_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmsne.vv v0, v8, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ne_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsne.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vx_swap_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ne_vx_swap_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmsne.vv v0, v9, v8, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ne_vx_swap_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsne.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vi_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsne.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vi_swap_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_swap_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsne.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsltu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ugt_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmsltu.vv v0, v9, v8, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ugt_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsgtu.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vx_swap_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ugt_vx_swap_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmsltu.vv v0, v8, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ugt_vx_swap_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsltu.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vi_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vi_swap_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_swap_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsltu.vx v0, v8, a1, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsleu.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_uge_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmsleu.vv v0, v9, v8, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_uge_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsleu.vv v0, v9, v8, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vx_swap_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_uge_vx_swap_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmsleu.vv v0, v8, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_uge_vx_swap_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsleu.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vi_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vi_swap_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_swap_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsleu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsltu.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ult_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmsltu.vv v0, v8, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ult_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsltu.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vx_swap_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ult_vx_swap_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmsltu.vv v0, v9, v8, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ult_vx_swap_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsgtu.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vi_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsleu.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vi_swap_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_swap_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsgtu.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmslt.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sgt_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmslt.vv v0, v9, v8, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sgt_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsgt.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vx_swap_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sgt_vx_swap_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmslt.vv v0, v8, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sgt_vx_swap_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmslt.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vi_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vi_swap_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_swap_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmslt.vx v0, v8, a1, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sge_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sge_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsle.vv v0, v9, v8, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vx_swap_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sge_vx_swap_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmsle.vv v0, v8, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sge_vx_swap_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsle.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vi_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vi_swap_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_swap_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmslt.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_slt_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmslt.vv v0, v8, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_slt_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmslt.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vx_swap_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_slt_vx_swap_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmslt.vv v0, v9, v8, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_slt_vx_swap_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsgt.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vi_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 3, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vi_swap_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_swap_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsgt.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv1i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsle.vv v0, v8, v9, v0.t +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vx_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sle_vx_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmsle.vv v0, v8, v9, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sle_vx_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsle.vx v0, v8, a0, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vx_swap_nxv1i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sle_vx_swap_nxv1i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m1, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v9, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m1, ta, ma +; RV32-NEXT: vmsle.vv v0, v9, v8, v0.t +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sle_vx_swap_nxv1i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m1, ta, mu +; RV64-NEXT: vmv.v.x v9, a0 +; RV64-NEXT: vsetvli zero, a1, e64, m1, ta, ma +; RV64-NEXT: vmsle.vv v0, v9, v8, v0.t +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vi_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsle.vi v0, v8, 4, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vi_swap_nxv1i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_swap_nxv1i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m1, ta, mu +; CHECK-NEXT: vmv.v.i v9, 4 +; CHECK-NEXT: vsetvli zero, a0, e64, m1, ta, ma +; CHECK-NEXT: vmsle.vv v0, v9, v8, v0.t +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv1i64( %vb, %va, metadata !"sle", %m, i32 %evl) + ret %v +} + +declare @llvm.vp.icmp.nxv8i64(, , metadata, , i32) + +define @icmp_eq_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmseq.vv v24, v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_eq_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmseq.vv v16, v8, v24, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_eq_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmseq.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vx_swap_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_eq_vx_swap_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmseq.vv v16, v24, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_eq_vx_swap_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmseq.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vi_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmseq.vi v16, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_eq_vi_swap_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_eq_vi_swap_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmseq.vi v16, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"eq", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsne.vv v24, v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ne_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmsne.vv v16, v8, v24, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ne_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsne.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vx_swap_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ne_vx_swap_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmsne.vv v16, v24, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ne_vx_swap_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsne.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vi_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsne.vi v16, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ne_vi_swap_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ne_vi_swap_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsne.vi v16, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"ne", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsltu.vv v24, v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ugt_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmsltu.vv v16, v24, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ugt_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsgtu.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vx_swap_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ugt_vx_swap_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmsltu.vv v16, v8, v24, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ugt_vx_swap_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsltu.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vi_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsgtu.vi v16, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_ugt_vi_swap_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ugt_vi_swap_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsltu.vx v16, v8, a1, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"ugt", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsleu.vv v24, v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_uge_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmsleu.vv v16, v24, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_uge_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vmv.v.x v24, a0 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsleu.vv v16, v24, v8, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vx_swap_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_uge_vx_swap_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmsleu.vv v16, v8, v24, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_uge_vx_swap_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsleu.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vi_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsgtu.vi v16, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_uge_vi_swap_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_uge_vi_swap_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsleu.vi v16, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"uge", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsltu.vv v24, v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ult_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmsltu.vv v16, v8, v24, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ult_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsltu.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vx_swap_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_ult_vx_swap_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmsltu.vv v16, v24, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_ult_vx_swap_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsgtu.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vi_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsleu.vi v16, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_ult_vi_swap_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_ult_vi_swap_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsgtu.vi v16, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"ult", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmslt.vv v24, v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sgt_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmslt.vv v16, v24, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sgt_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsgt.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vx_swap_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sgt_vx_swap_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmslt.vv v16, v8, v24, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sgt_vx_swap_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmslt.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vi_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsgt.vi v16, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sgt_vi_swap_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sgt_vi_swap_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: li a1, 4 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmslt.vx v16, v8, a1, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"sgt", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsle.vv v24, v16, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sge_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmsle.vv v16, v24, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sge_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vmv.v.x v24, a0 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsle.vv v16, v24, v8, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vx_swap_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sge_vx_swap_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmsle.vv v16, v8, v24, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sge_vx_swap_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsle.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vi_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsgt.vi v16, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_sge_vi_swap_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sge_vi_swap_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsle.vi v16, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"sge", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmslt.vv v24, v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_slt_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmslt.vv v16, v8, v24, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_slt_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmslt.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vx_swap_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_slt_vx_swap_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmslt.vv v16, v24, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_slt_vx_swap_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsgt.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vi_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsle.vi v16, v8, 3, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_slt_vi_swap_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_slt_vi_swap_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsgt.vi v16, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"slt", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vv_nxv8i64( %va, %vb, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsle.vv v24, v8, v16, v0.t +; CHECK-NEXT: vmv1r.v v0, v24 +; CHECK-NEXT: ret + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vx_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sle_vx_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmsle.vv v16, v8, v24, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sle_vx_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsle.vx v16, v8, a0, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vx_swap_nxv8i64( %va, i64 %b, %m, i32 zeroext %evl) { +; RV32-LABEL: icmp_sle_vx_swap_nxv8i64: +; RV32: # %bb.0: +; RV32-NEXT: addi sp, sp, -16 +; RV32-NEXT: .cfi_def_cfa_offset 16 +; RV32-NEXT: sw a1, 12(sp) +; RV32-NEXT: sw a0, 8(sp) +; RV32-NEXT: vsetvli a0, zero, e64, m8, ta, mu +; RV32-NEXT: addi a0, sp, 8 +; RV32-NEXT: vlse64.v v24, (a0), zero +; RV32-NEXT: vsetvli zero, a2, e64, m8, ta, ma +; RV32-NEXT: vmsle.vv v16, v24, v8, v0.t +; RV32-NEXT: vmv1r.v v0, v16 +; RV32-NEXT: addi sp, sp, 16 +; RV32-NEXT: ret +; +; RV64-LABEL: icmp_sle_vx_swap_nxv8i64: +; RV64: # %bb.0: +; RV64-NEXT: vsetvli a2, zero, e64, m8, ta, mu +; RV64-NEXT: vmv.v.x v24, a0 +; RV64-NEXT: vsetvli zero, a1, e64, m8, ta, ma +; RV64-NEXT: vmsle.vv v16, v24, v8, v0.t +; RV64-NEXT: vmv1r.v v0, v16 +; RV64-NEXT: ret + %elt.head = insertelement poison, i64 %b, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vi_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsle.vi v16, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %va, %vb, metadata !"sle", %m, i32 %evl) + ret %v +} + +define @icmp_sle_vi_swap_nxv8i64( %va, %m, i32 zeroext %evl) { +; CHECK-LABEL: icmp_sle_vi_swap_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64, m8, ta, mu +; CHECK-NEXT: vmv.v.i v24, 4 +; CHECK-NEXT: vsetvli zero, a0, e64, m8, ta, ma +; CHECK-NEXT: vmsle.vv v16, v24, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v16 +; CHECK-NEXT: ret + %elt.head = insertelement poison, i64 4, i32 0 + %vb = shufflevector %elt.head, poison, zeroinitializer + %v = call @llvm.vp.icmp.nxv8i64( %vb, %va, metadata !"sle", %m, i32 %evl) + ret %v +}