diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp --- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAG.cpp @@ -9076,6 +9076,18 @@ if (ConstantSDNode *CN = dyn_cast(N)) return CN; + // SplatVectors can truncate their operands. Ignore that case here unless + // AllowTruncation is set. + if (N->getOpcode() == ISD::SPLAT_VECTOR) { + auto VecEltVT = N->getValueType(0).getVectorElementType(); + if (auto *CN = dyn_cast(N->getOperand(0))) { + EVT CVT = CN->getValueType(0); + assert(CVT.bitsGE(VecEltVT) && "Illegal splat_vector element extension"); + if (AllowTruncation || (CVT.bitsEq(VecEltVT))) + return CN; + } + } + if (BuildVectorSDNode *BV = dyn_cast(N)) { BitVector UndefElements; ConstantSDNode *CN = BV->getConstantSplatNode(&UndefElements); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVSDPatterns.td @@ -40,6 +40,10 @@ def riscv_m_vnot : PatFrag<(ops node:$in), (xor node:$in, (splat_vector (XLenVT 1)))>; +class SwapHelper { + dag Value = !con(Prefix, !if(swap, B, A), !if(swap, A, B), Suffix); +} + multiclass VPatUSLoadStoreSDNode { + foreach vti = AllIntegerVectors in { + defvar instruction = !cast(instruction_name#"_VV_"#vti.LMul.MX); + def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), + (vti.Vector vti.RegClass:$rs2), cc)), + SwapHelper<(instruction), + (instruction vti.RegClass:$rs1), + (instruction vti.RegClass:$rs2), + (instruction VLMax, vti.SEW), + swap>.Value>; + } +} + +multiclass VPatIntegerSetCCSDNode_XI { + foreach vti = AllIntegerVectors in { + defvar instruction = !cast(instruction_name#_#kind#_#vti.LMul.MX); + def : Pat<(vti.Mask (setcc (vti.Vector vti.RegClass:$rs1), + (vti.Vector (SplatPatKind xop_kind:$rs2)), cc)), + SwapHelper<(instruction), + (instruction vti.RegClass:$rs1), + (instruction xop_kind:$rs2), + (instruction VLMax, vti.SEW), + swap>.Value>; + } +} + +multiclass VPatIntegerSetCCSDNode_VV_VX_VI { + defm : VPatIntegerSetCCSDNode_VV; + defm : VPatIntegerSetCCSDNode_XI; + defm : VPatIntegerSetCCSDNode_XI; +} + +multiclass VPatIntegerSetCCSDNode_VV_VX { + defm : VPatIntegerSetCCSDNode_VV; + defm : VPatIntegerSetCCSDNode_XI; +} + +multiclass VPatIntegerSetCCSDNode_VX_VI { + defm : VPatIntegerSetCCSDNode_XI; + defm : VPatIntegerSetCCSDNode_XI; +} + //===----------------------------------------------------------------------===// // Patterns. //===----------------------------------------------------------------------===// @@ -169,6 +233,28 @@ defm "" : VPatBinarySDNode_VV_VX_VI; defm "" : VPatBinarySDNode_VV_VX_VI; +// 12.8. Vector Integer Comparison Instructions +defm "" : VPatIntegerSetCCSDNode_VV_VX_VI; +defm "" : VPatIntegerSetCCSDNode_VV_VX_VI; + +// FIXME: Support immediate forms of these by choosing SLE decrementing the +// immediate +defm "" : VPatIntegerSetCCSDNode_VV_VX; +defm "" : VPatIntegerSetCCSDNode_VV_VX; + +defm "" : VPatIntegerSetCCSDNode_VV; +defm "" : VPatIntegerSetCCSDNode_VV; +defm "" : VPatIntegerSetCCSDNode_VX_VI; +defm "" : VPatIntegerSetCCSDNode_VX_VI; + +defm "" : VPatIntegerSetCCSDNode_VV_VX_VI; +defm "" : VPatIntegerSetCCSDNode_VV_VX_VI; + +// FIXME: Support immediate forms of these by choosing SGT and decrementing the +// immediate +defm "" : VPatIntegerSetCCSDNode_VV; +defm "" : VPatIntegerSetCCSDNode_VV; + // 12.9. Vector Integer Min/Max Instructions defm "" : VPatBinarySDNode_VV_VX; defm "" : VPatBinarySDNode_VV_VX; diff --git a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll --- a/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll +++ b/llvm/test/CodeGen/AArch64/sve-intrinsics-int-compares-with-imm.ll @@ -820,7 +820,7 @@ define @ir_cmphi_h( %a) { ; CHECK-LABEL: ir_cmphi_h -; CHECK: cmphi p0.h, p0/z, z0.h, #0 +; CHECK: cmpne p0.h, p0/z, z0.h, #0 ; CHECK-NEXT: ret %elt = insertelement undef, i16 0, i32 0 %splat = shufflevector %elt, undef, zeroinitializer @@ -948,7 +948,7 @@ define @ir_cmphs_h( %a) { ; CHECK-LABEL: ir_cmphs_h -; CHECK: cmphs p0.h, p0/z, z0.h, #0 +; CHECK: ptrue p0.h ; CHECK-NEXT: ret %elt = insertelement undef, i16 0, i32 0 %splat = shufflevector %elt, undef, zeroinitializer @@ -1076,7 +1076,7 @@ define @ir_cmplo_h( %a) { ; CHECK-LABEL: ir_cmplo_h -; CHECK: cmplo p0.h, p0/z, z0.h, #0 +; CHECK: whilelo p0.h, xzr, xzr ; CHECK-NEXT: ret %elt = insertelement undef, i16 0, i32 0 %splat = shufflevector %elt, undef, zeroinitializer diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv32.ll @@ -0,0 +1,3469 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @icmp_eq_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_ne_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -16 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 15 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -15 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -16 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 15 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -15 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_eq_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_ne_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -16 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 15 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -15 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -16 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 15 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -15 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_eq_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_ne_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -16 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 15 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -15 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -16 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 15 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -15 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_eq_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmseq.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmseq.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_ne_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsne.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsne.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsltu.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsltu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -16 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 15 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -15 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsltu.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsltu.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmslt.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmslt.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -16 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 15 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -15 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmslt.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmslt.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a2, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v8, v8, a1 +; CHECK-NEXT: vmv.v.x v24, a0 +; CHECK-NEXT: vsll.vx v24, v24, a1 +; CHECK-NEXT: vsrl.vx v24, v24, a1 +; CHECK-NEXT: vor.vv v8, v24, v8 +; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + diff --git a/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/setcc-integer-rv64.ll @@ -0,0 +1,3401 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s + +define @icmp_eq_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_ne_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -16 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 15 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -15 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsleu.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v17, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -16 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, 15 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.i v25, -15 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a0 +; CHECK-NEXT: vmsle.vv v0, v25, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i8( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v16, v17 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i8( %va, i8 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i8: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i8_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i8_1( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i8_2( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i8_3( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i8 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i8_4( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i8_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i8 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_eq_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_ne_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -16 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 15 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -15 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsleu.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v18, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -16 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, 15 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.i v26, -15 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vmsle.vv v0, v26, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i16( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v16, v18 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i16( %va, i16 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i16: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i16_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i16_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i16_1( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i16_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i16_2( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i16_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i16_3( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i16_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i16 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i16_4( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i16_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i16 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_eq_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_ne_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -16 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 15 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -15 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsleu.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v20, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -16 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, 15 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.i v28, -15 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vmsle.vv v0, v28, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i32( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v16, v20 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i32( %va, i32 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i32: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i32_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i32_1( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i32_2( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i32_3( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i32 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i32_4( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i32_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i32 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_eq_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_eq_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmseq.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp eq %va, %vb + ret %vc +} + +define @icmp_eq_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_eq_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_eq_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_eq_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmseq.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp eq %va, %splat + ret %vc +} + +define @icmp_ne_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ne_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsne.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp ne %va, %vb + ret %vc +} + +define @icmp_ne_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ne_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ne_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_ne_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ne %va, %splat + ret %vc +} + +define @icmp_ugt_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ugt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsltu.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp ugt %va, %vb + ret %vc +} + +define @icmp_ugt_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ugt_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsne.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_ugt_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_ugt_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgtu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ugt %va, %splat + ret %vc +} + +define @icmp_uge_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_uge_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp uge %va, %vb + ret %vc +} + +define @icmp_uge_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_uge_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmset.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -16 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 15 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -15 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_uge_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_uge_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsleu.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp uge %va, %splat + ret %vc +} + +define @icmp_ult_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ult_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsltu.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp ult %va, %vb + ret %vc +} + +define @icmp_ult_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ult_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e8,m1,ta,mu +; CHECK-NEXT: vmclr.m v0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ult_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_ult_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsltu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ult %va, %splat + ret %vc +} + +define @icmp_ule_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_ule_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsleu.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp ule %va, %vb + ret %vc +} + +define @icmp_ule_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_ule_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_ule_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_ule_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsleu.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp ule %va, %splat + ret %vc +} + +define @icmp_sgt_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_sgt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmslt.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp sgt %va, %vb + ret %vc +} + +define @icmp_sgt_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sgt_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sgt_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_sgt_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsgt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sgt %va, %splat + ret %vc +} + +define @icmp_sge_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_sge_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %vc = icmp sge %va, %vb + ret %vc +} + +define @icmp_sge_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sge_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 0 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -16 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, 15 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.i v8, -15 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_sge_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_sge_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmv.v.x v8, a0 +; CHECK-NEXT: vmsle.vv v0, v8, v16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sge %va, %splat + ret %vc +} + +define @icmp_slt_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_slt_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmslt.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp slt %va, %vb + ret %vc +} + +define @icmp_slt_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_slt_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, zero +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 15 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, -15 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_slt_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_slt_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmslt.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp slt %va, %splat + ret %vc +} + +define @icmp_sle_vv_nxv8i64( %va, %vb) { +; CHECK-LABEL: icmp_sle_vv_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vle64.v v8, (a0) +; CHECK-NEXT: vmsle.vv v0, v16, v8 +; CHECK-NEXT: ret + %vc = icmp sle %va, %vb + ret %vc +} + +define @icmp_sle_vx_nxv8i64( %va, i64 %b) { +; CHECK-LABEL: icmp_sle_vx_nxv8i64: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 %b, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i64_0( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64_0: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 0, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i64_1( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64_1: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -16 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i64_2( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64_2: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, 15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i64_3( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64_3: +; CHECK: # %bb.0: +; CHECK-NEXT: vsetvli a0, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vi v0, v16, -15 +; CHECK-NEXT: ret + %head = insertelement undef, i64 -15, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} + +define @icmp_sle_vi_nxv8i64_4( %va) { +; CHECK-LABEL: icmp_sle_vi_nxv8i64_4: +; CHECK: # %bb.0: +; CHECK-NEXT: addi a0, zero, 16 +; CHECK-NEXT: vsetvli a1, zero, e64,m8,ta,mu +; CHECK-NEXT: vmsle.vx v0, v16, a0 +; CHECK-NEXT: ret + %head = insertelement undef, i64 16, i32 0 + %splat = shufflevector %head, undef, zeroinitializer + %vc = icmp sle %va, %splat + ret %vc +} +