diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -989,6 +989,8 @@ defm vmsle : RISCVCompare; defm vmsgtu : RISCVCompare; defm vmsgt : RISCVCompare; + defm vmsgeu : RISCVCompare; + defm vmsge : RISCVCompare; defm vminu : RISCVBinaryAAX; defm vmin : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -431,6 +431,188 @@ break; } + case ISD::INTRINSIC_WO_CHAIN: { + unsigned IntNo = Node->getConstantOperandVal(0); + switch (IntNo) { + // By default we do not custom select any intrinsic. + default: + break; + case Intrinsic::riscv_vmsgeu: + case Intrinsic::riscv_vmsge: { + SDValue Src1 = Node->getOperand(1); + SDValue Src2 = Node->getOperand(2); + // Only custom select scalar second operand. + if (Src2.getValueType() != XLenVT) + break; + // Small constants are handled with patterns. + if (auto *C = dyn_cast(Src2)) { + int64_t CVal = C->getSExtValue(); + if (CVal >= -15 && CVal <= 16) + break; + } + bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu; + MVT Src1VT = Src1.getSimpleValueType(); + unsigned VMSLTOpcode, VMNANDOpcode; + switch (RISCVTargetLowering::getLMUL(Src1VT)) { + default: + llvm_unreachable("Unexpected LMUL!"); + case RISCVVLMUL::LMUL_F8: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8; + break; + case RISCVVLMUL::LMUL_F4: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4; + break; + case RISCVVLMUL::LMUL_F2: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2; + break; + case RISCVVLMUL::LMUL_1: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1; + break; + case RISCVVLMUL::LMUL_2: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2; + break; + case RISCVVLMUL::LMUL_4: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4; + break; + case RISCVVLMUL::LMUL_8: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8; + break; + } + SDValue SEW = + CurDAG->getTargetConstant(Src1VT.getScalarSizeInBits(), DL, XLenVT); + SDValue VL; + selectVLOp(Node->getOperand(3), VL); + + // Expand to + // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd + SDValue Cmp = SDValue( + CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}), + 0); + ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT, + {Cmp, Cmp, VL, SEW})); + return; + } + case Intrinsic::riscv_vmsgeu_mask: + case Intrinsic::riscv_vmsge_mask: { + SDValue Src1 = Node->getOperand(2); + SDValue Src2 = Node->getOperand(3); + // Only custom select scalar second operand. + if (Src2.getValueType() != XLenVT) + break; + // Small constants are handled with patterns. + if (auto *C = dyn_cast(Src2)) { + int64_t CVal = C->getSExtValue(); + if (CVal >= -15 && CVal <= 16) + break; + } + bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask; + MVT Src1VT = Src1.getSimpleValueType(); + unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode; + switch (RISCVTargetLowering::getLMUL(Src1VT)) { + default: + llvm_unreachable("Unexpected LMUL!"); + case RISCVVLMUL::LMUL_F8: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK + : RISCV::PseudoVMSLT_VX_MF8_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8; + break; + case RISCVVLMUL::LMUL_F4: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK + : RISCV::PseudoVMSLT_VX_MF4_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4; + break; + case RISCVVLMUL::LMUL_F2: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK + : RISCV::PseudoVMSLT_VX_MF2_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2; + break; + case RISCVVLMUL::LMUL_1: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK + : RISCV::PseudoVMSLT_VX_M1_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_M1; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1; + break; + case RISCVVLMUL::LMUL_2: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK + : RISCV::PseudoVMSLT_VX_M2_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_M2; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2; + break; + case RISCVVLMUL::LMUL_4: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK + : RISCV::PseudoVMSLT_VX_M4_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_M4; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4; + break; + case RISCVVLMUL::LMUL_8: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK + : RISCV::PseudoVMSLT_VX_M8_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_M8; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8; + break; + } + SDValue SEW = + CurDAG->getTargetConstant(Src1VT.getScalarSizeInBits(), DL, XLenVT); + SDValue VL; + selectVLOp(Node->getOperand(5), VL); + SDValue MaskedOff = Node->getOperand(1); + SDValue Mask = Node->getOperand(4); + // If the MaskedOff value and the Mask are the same value use + // vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt + // This avoids needing to copy v0 to vd before starting the next sequence. + if (Mask == MaskedOff) { + SDValue Cmp = SDValue( + CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}), + 0); + ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT, + {Mask, Cmp, VL, SEW})); + return; + } + + // Otherwise use + // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0 + SDValue Cmp = SDValue( + CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT, + {MaskedOff, Src1, Src2, Mask, VL, SEW}), + 0); + ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT, + {Cmp, Mask, VL, SEW})); + return; + } + } + break; + } case ISD::INTRINSIC_W_CHAIN: { unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); switch (IntNo) { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -3952,10 +3952,12 @@ defm : VPatBinarySwappedM_VV<"int_riscv_vmsgtu", "PseudoVMSLTU", AllIntegerVectors>; defm : VPatBinarySwappedM_VV<"int_riscv_vmsgt", "PseudoVMSLT", AllIntegerVectors>; +defm : VPatBinarySwappedM_VV<"int_riscv_vmsgeu", "PseudoVMSLEU", AllIntegerVectors>; +defm : VPatBinarySwappedM_VV<"int_riscv_vmsge", "PseudoVMSLE", AllIntegerVectors>; + // Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16. This // avoids the user needing to know that there is no vmslt(u).vi instruction. -// This is limited to vmslt(u).vx as there is no vmsge().vx intrinsic or -// instruction. +// Similar for vmsge(u).vx intrinsics using vmslt(u).vi. foreach vti = AllIntegerVectors in { def : Pat<(vti.Mask (int_riscv_vmslt (vti.Vector vti.RegClass:$rs1), (vti.Scalar simm5_plus1:$rs2), @@ -4017,6 +4019,67 @@ (vti.Mask V0), GPR:$vl, vti.SEW)>; + + def : Pat<(vti.Mask (int_riscv_vmsge (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + VLOpFrag)), + (!cast("PseudoVMSGT_VI_"#vti.LMul.MX) vti.RegClass:$rs1, + (DecImm simm5_plus1:$rs2), + GPR:$vl, + vti.SEW)>; + def : Pat<(vti.Mask (int_riscv_vmsge_mask (vti.Mask VR:$merge), + (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + (vti.Mask V0), + VLOpFrag)), + (!cast("PseudoVMSGT_VI_"#vti.LMul.MX#"_MASK") + VR:$merge, + vti.RegClass:$rs1, + (DecImm simm5_plus1:$rs2), + (vti.Mask V0), + GPR:$vl, + vti.SEW)>; + + def : Pat<(vti.Mask (int_riscv_vmsgeu (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + VLOpFrag)), + (!cast("PseudoVMSGTU_VI_"#vti.LMul.MX) vti.RegClass:$rs1, + (DecImm simm5_plus1:$rs2), + GPR:$vl, + vti.SEW)>; + def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge), + (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + (vti.Mask V0), + VLOpFrag)), + (!cast("PseudoVMSGTU_VI_"#vti.LMul.MX#"_MASK") + VR:$merge, + vti.RegClass:$rs1, + (DecImm simm5_plus1:$rs2), + (vti.Mask V0), + GPR:$vl, + vti.SEW)>; + + // Special cases to avoid matching vmsgeu.vi 0 (always true) to + // vmsgtu.vi -1 (always false). Instead match to vmsne.vv. + def : Pat<(vti.Mask (int_riscv_vmsgeu (vti.Vector vti.RegClass:$rs1), + (vti.Scalar 0), VLOpFrag)), + (!cast("PseudoVMSEQ_VV_"#vti.LMul.MX) vti.RegClass:$rs1, + vti.RegClass:$rs1, + GPR:$vl, + vti.SEW)>; + def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge), + (vti.Vector vti.RegClass:$rs1), + (vti.Scalar 0), + (vti.Mask V0), + VLOpFrag)), + (!cast("PseudoVMSEQ_VV_"#vti.LMul.MX#"_MASK") + VR:$merge, + vti.RegClass:$rs1, + vti.RegClass:$rs1, + (vti.Mask V0), + GPR:$vl, + vti.SEW)>; } //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll @@ -0,0 +1,2832 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsge.nxv1i8( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i8( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i8( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i8( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv8i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i8( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv16i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv32i8( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv32i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i16( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i16( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i16( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i16( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv8i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i16( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv16i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i32( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i32( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i32( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i32( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv8i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i64( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i64( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i64( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i64( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i64( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i64( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vmsge_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vmsge_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vmsge_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vmsge_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vmsge_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vmsge_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vmsge_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vmsge_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vmsge_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vmsge_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vmsge_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vmsge_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vmsge_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vmsge_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vmsge_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i64.i64( + , + i64, + i32); + +define @intrinsic_vmsge_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v25, (a0), zero +; CHECK-NEXT: vmsle.vv v0, v25, v8 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i64.i64( + %0, + i64 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i64.i64( + , + , + i64, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v25, (a0), zero +; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsle.vv v26, v25, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i64.i64( + , + i64, + i32); + +define @intrinsic_vmsge_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmsle.vv v0, v26, v8 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i64.i64( + %0, + i64 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i64.i64( + , + , + i64, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i64.i64( + , + i64, + i32); + +define @intrinsic_vmsge_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v28, (a0), zero +; CHECK-NEXT: vmsle.vv v0, v28, v8 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i64.i64( + %0, + i64 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i64.i64( + , + , + i64, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v28, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsle.vv v25, v28, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i8.i8( + %0, + i8 -15, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -15, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( + %0, + %1, + i8 -14, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -14 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i8.i8( + %0, + i8 -13, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -13, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( + %0, + %1, + i8 -12, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i8.i8( + %0, + i8 -11, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -11, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( + %0, + %1, + i8 -10, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv8i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i8.i8( + %0, + i8 -9, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( + %0, + %1, + i8 -8, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv16i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i8.i8( + %0, + i8 -7, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, -7, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( + %0, + %1, + i8 -6, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv32i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -6 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv32i8.i8( + %0, + i8 -5, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, -5, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( + %0, + %1, + i8 -4, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -4 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i16.i16( + %0, + i16 -3, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -3, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( + %0, + %1, + i16 -2, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -2 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i16.i16( + %0, + i16 -1, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -1, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( + %0, + %1, + i16 0, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -1 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i16.i16( + %0, + i16 0, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 0, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv8i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 1 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i16.i16( + %0, + i16 2, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, 2, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( + %0, + %1, + i16 3, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv16i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 3 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i16.i16( + %0, + i16 4, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( + %0, + %1, + i16 5, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 5 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i32.i32( + %0, + i32 6, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 6, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( + %0, + %1, + i32 7, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 7 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i32.i32( + %0, + i32 8, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i32.i32( + %0, + i32 10, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, 10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( + %0, + %1, + i32 11, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv8i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 11 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i32.i32( + %0, + i32 12, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, 12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( + %0, + %1, + i32 13, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i64_i64( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i64.i64( + %0, + i64 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( + %0, + %1, + i64 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i64_i64( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i64.i64( + %0, + i64 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( + %0, + %1, + i64 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i64_i64( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i64.i64( + %0, + i64 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( + %0, + %1, + i64 9, + %2, + i32 %3) + + ret %a +} + +; Test cases where the mask and maskedoff are the same value. +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v25, (a0), zero +; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu +; CHECK-NEXT: vmsle.vv v26, v25, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v28, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v28, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %0, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll @@ -0,0 +1,2775 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsge.nxv1i8( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i8( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i8( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i8( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv8i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i8( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv16i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv32i8( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv32i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i16( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i16( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i16( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i16( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv8i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i16( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv16i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i32( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i32( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i32( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i32( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv8i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i64( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i64( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i64( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vmsge_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vmsge_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vmsge_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vmsge_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vmsge_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vmsge_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vmsge_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vmsge_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vmsge_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vmsge_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vmsge_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vmsge_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vmsge_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vmsge_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vmsge_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vmsge_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vmsge_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vmsge_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i8.i8( + %0, + i8 -15, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -15, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( + %0, + %1, + i8 -14, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -14 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i8.i8( + %0, + i8 -13, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -13, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( + %0, + %1, + i8 -12, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i8.i8( + %0, + i8 -11, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -11, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( + %0, + %1, + i8 -10, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv8i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i8.i8( + %0, + i8 -9, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( + %0, + %1, + i8 -8, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv16i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i8.i8( + %0, + i8 -7, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, -7, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( + %0, + %1, + i8 -6, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv32i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -6 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv32i8.i8( + %0, + i8 -5, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, -5, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( + %0, + %1, + i8 -4, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -4 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i16.i16( + %0, + i16 -3, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -3, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( + %0, + %1, + i16 -2, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -2 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i16.i16( + %0, + i16 -1, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -1, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( + %0, + %1, + i16 0, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -1 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i16.i16( + %0, + i16 0, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 0, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv8i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 1 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i16.i16( + %0, + i16 2, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, 2, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( + %0, + %1, + i16 3, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv16i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 3 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i16.i16( + %0, + i16 4, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( + %0, + %1, + i16 5, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 5 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i32.i32( + %0, + i32 6, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 6, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( + %0, + %1, + i32 7, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 7 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i32.i32( + %0, + i32 8, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i32.i32( + %0, + i32 10, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, 10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( + %0, + %1, + i32 11, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv8i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 11 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i32.i32( + %0, + i32 12, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, 12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( + %0, + %1, + i32 13, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 13 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i64.i64( + %0, + i64 14, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 14, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( + %0, + %1, + i64 15, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 15 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i64.i64( + %0, + i64 16, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, -16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( + %0, + %1, + i64 -15, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -15 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i64.i64( + %0, + i64 -14, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, -14, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( + %0, + %1, + i64 -13, + %2, + i64 %3) + + ret %a +} + +; Test cases where the mask and maskedoff are the same value. +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %0, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll @@ -0,0 +1,2832 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsgeu.nxv1i8( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i8( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i8( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i8( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv8i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i8( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv16i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv32i8( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv32i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i16( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i16( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i16( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i16( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv8i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i16( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv16i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i32( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i32( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i32( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i32( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv8i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i64( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i64( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i64( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i64( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i64( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i64( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vmsgeu_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vmsgeu_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vmsgeu_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vmsgeu_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vmsgeu_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vmsgeu_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vmsgeu_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vmsgeu_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vmsgeu_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vmsgeu_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vmsgeu_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vmsgeu_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vmsgeu_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vmsgeu_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vmsgeu_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i64.i64( + , + i64, + i32); + +define @intrinsic_vmsgeu_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v25, (a0), zero +; CHECK-NEXT: vmsleu.vv v0, v25, v8 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( + %0, + i64 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + , + , + i64, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v25, (a0), zero +; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsleu.vv v26, v25, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i64.i64( + , + i64, + i32); + +define @intrinsic_vmsgeu_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmsleu.vv v0, v26, v8 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( + %0, + i64 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + , + , + i64, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i64.i64( + , + i64, + i32); + +define @intrinsic_vmsgeu_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v28, (a0), zero +; CHECK-NEXT: vmsleu.vv v0, v28, v8 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( + %0, + i64 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + , + , + i64, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v28, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsleu.vv v25, v28, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( + %0, + i8 -15, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -15, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + %0, + %1, + i8 -14, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -14 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( + %0, + i8 -13, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -13, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + %0, + %1, + i8 -12, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( + %0, + i8 -11, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -11, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + %0, + %1, + i8 -10, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv8i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( + %0, + i8 -9, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + %0, + %1, + i8 -8, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv16i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( + %0, + i8 -7, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, -7, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + %0, + %1, + i8 -6, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv32i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -6 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( + %0, + i8 -5, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, -5, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + %0, + %1, + i8 -4, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -4 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( + %0, + i16 -3, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -3, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + %0, + %1, + i16 -2, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -2 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( + %0, + i16 -1, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmseq.vv v25, v8, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + %0, + %1, + i16 0, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmseq.vv v0, v8, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( + %0, + i16 0, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 0, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv8i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 1 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( + %0, + i16 2, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, 2, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + %0, + %1, + i16 3, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv16i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 3 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( + %0, + i16 4, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + %0, + %1, + i16 5, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 5 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( + %0, + i32 6, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 6, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + %0, + %1, + i32 7, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 7 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( + %0, + i32 8, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( + %0, + i32 10, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, 10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + %0, + %1, + i32 11, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv8i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 11 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( + %0, + i32 12, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, 12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + %0, + %1, + i32 13, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i64_i64( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 13 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( + %0, + i64 14, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 14, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + %0, + %1, + i64 15, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i64_i64( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 15 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( + %0, + i64 16, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, -16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + %0, + %1, + i64 -15, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i64_i64( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -15 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( + %0, + i64 -14, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, -14, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + %0, + %1, + i64 -13, + %2, + i32 %3) + + ret %a +} + +; Test cases where the mask and maskedoff are the same value. +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m1,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v25, (a0), zero +; CHECK-NEXT: vmv1r.v v26, v0 +; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v26, v25, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v26 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m2,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v26, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: addi sp, sp, -16 +; CHECK-NEXT: sw a1, 12(sp) +; CHECK-NEXT: sw a0, 8(sp) +; CHECK-NEXT: vsetvli a0, a2, e64,m4,ta,mu +; CHECK-NEXT: addi a0, sp, 8 +; CHECK-NEXT: vlse64.v v28, (a0), zero +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v28, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: addi sp, sp, 16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %0, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll @@ -0,0 +1,2775 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsgeu.nxv1i8( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i8( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i8( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i8( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv8i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i8( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv16i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv32i8( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv32i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i16( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i16( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i16( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i16( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv8i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i16( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv16i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i32( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i32( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i32( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i32( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv8i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i64( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i64( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i64( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vmsgeu_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vmsgeu_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vmsgeu_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vmsgeu_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vmsgeu_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vmsgeu_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vmsgeu_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vmsgeu_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vmsgeu_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vmsgeu_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vmsgeu_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vmsgeu_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vmsgeu_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vmsgeu_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vmsgeu_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vmsgeu_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vmsgeu_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vmsgeu_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( + %0, + i8 -15, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -15, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + %0, + %1, + i8 -14, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -14 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( + %0, + i8 -13, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -13, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + %0, + %1, + i8 -12, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( + %0, + i8 -11, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -11, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + %0, + %1, + i8 -10, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv8i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( + %0, + i8 -9, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + %0, + %1, + i8 -8, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv16i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( + %0, + i8 -7, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, -7, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + %0, + %1, + i8 -6, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv32i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -6 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( + %0, + i8 -5, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, -5, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + %0, + %1, + i8 -4, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -4 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( + %0, + i16 -3, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -3, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + %0, + %1, + i16 -2, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -2 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( + %0, + i16 -1, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmseq.vv v25, v8, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + %0, + %1, + i16 0, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmseq.vv v0, v8, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( + %0, + i16 0, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 0, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv8i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 1 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( + %0, + i16 2, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, 2, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + %0, + %1, + i16 3, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv16i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 3 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( + %0, + i16 4, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + %0, + %1, + i16 5, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 5 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( + %0, + i32 6, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 6, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + %0, + %1, + i32 7, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 7 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( + %0, + i32 8, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( + %0, + i32 10, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, 10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + %0, + %1, + i32 11, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv8i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 11 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( + %0, + i32 12, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, 12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + %0, + %1, + i32 13, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 13 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( + %0, + i64 14, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 14, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + %0, + %1, + i64 15, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 15 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( + %0, + i64 16, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, -16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + %0, + %1, + i64 -15, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -15 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( + %0, + i64 -14, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, -14, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + %0, + %1, + i64 -13, + %2, + i64 %3) + + ret %a +} + +; Test cases where the mask and maskedoff are the same value. +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %0, + i64 %3) + + ret %a +}