diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -989,6 +989,8 @@ defm vmsle : RISCVCompare; defm vmsgtu : RISCVCompare; defm vmsgt : RISCVCompare; + defm vmsgeu : RISCVCompare; + defm vmsge : RISCVCompare; defm vminu : RISCVBinaryAAX; defm vmin : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -431,6 +431,188 @@ break; } + case ISD::INTRINSIC_WO_CHAIN: { + unsigned IntNo = Node->getConstantOperandVal(0); + switch (IntNo) { + // By default we do not custom select any intrinsic. + default: + break; + case Intrinsic::riscv_vmsgeu: + case Intrinsic::riscv_vmsge: { + SDValue Src1 = Node->getOperand(1); + SDValue Src2 = Node->getOperand(2); + // Only custom select scalar second operand. + if (Src2.getValueType() != XLenVT) + break; + // Small constants are handled with patterns. + if (auto *C = dyn_cast(Src2)) { + int64_t CVal = C->getSExtValue(); + if (CVal >= -15 && CVal <= 16) + break; + } + bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu; + MVT Src1VT = Src1.getSimpleValueType(); + unsigned VMSLTOpcode, VMNANDOpcode; + switch (RISCVTargetLowering::getLMUL(Src1VT)) { + default: + llvm_unreachable("Unexpected LMUL!"); + case RISCVVLMUL::LMUL_F8: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF8; + break; + case RISCVVLMUL::LMUL_F4: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF4; + break; + case RISCVVLMUL::LMUL_F2: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_MF2; + break; + case RISCVVLMUL::LMUL_1: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_M1; + break; + case RISCVVLMUL::LMUL_2: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_M2; + break; + case RISCVVLMUL::LMUL_4: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_M4; + break; + case RISCVVLMUL::LMUL_8: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8; + VMNANDOpcode = RISCV::PseudoVMNAND_MM_M8; + break; + } + SDValue SEW = + CurDAG->getTargetConstant(Src1VT.getScalarSizeInBits(), DL, XLenVT); + SDValue VL; + selectVLOp(Node->getOperand(3), VL); + + // Expand to + // vmslt{u}.vx vd, va, x; vmnand.mm vd, vd, vd + SDValue Cmp = SDValue( + CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}), + 0); + ReplaceNode(Node, CurDAG->getMachineNode(VMNANDOpcode, DL, VT, + {Cmp, Cmp, VL, SEW})); + return; + } + case Intrinsic::riscv_vmsgeu_mask: + case Intrinsic::riscv_vmsge_mask: { + SDValue Src1 = Node->getOperand(2); + SDValue Src2 = Node->getOperand(3); + // Only custom select scalar second operand. + if (Src2.getValueType() != XLenVT) + break; + // Small constants are handled with patterns. + if (auto *C = dyn_cast(Src2)) { + int64_t CVal = C->getSExtValue(); + if (CVal >= -15 && CVal <= 16) + break; + } + bool IsUnsigned = IntNo == Intrinsic::riscv_vmsgeu_mask; + MVT Src1VT = Src1.getSimpleValueType(); + unsigned VMSLTOpcode, VMSLTMaskOpcode, VMXOROpcode, VMANDNOTOpcode; + switch (RISCVTargetLowering::getLMUL(Src1VT)) { + default: + llvm_unreachable("Unexpected LMUL!"); + case RISCVVLMUL::LMUL_F8: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8 : RISCV::PseudoVMSLT_VX_MF8; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF8_MASK + : RISCV::PseudoVMSLT_VX_MF8_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_MF8; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF8; + break; + case RISCVVLMUL::LMUL_F4: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4 : RISCV::PseudoVMSLT_VX_MF4; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF4_MASK + : RISCV::PseudoVMSLT_VX_MF4_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_MF4; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF4; + break; + case RISCVVLMUL::LMUL_F2: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2 : RISCV::PseudoVMSLT_VX_MF2; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_MF2_MASK + : RISCV::PseudoVMSLT_VX_MF2_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_MF2; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_MF2; + break; + case RISCVVLMUL::LMUL_1: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1 : RISCV::PseudoVMSLT_VX_M1; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M1_MASK + : RISCV::PseudoVMSLT_VX_M1_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_M1; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M1; + break; + case RISCVVLMUL::LMUL_2: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2 : RISCV::PseudoVMSLT_VX_M2; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M2_MASK + : RISCV::PseudoVMSLT_VX_M2_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_M2; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M2; + break; + case RISCVVLMUL::LMUL_4: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4 : RISCV::PseudoVMSLT_VX_M4; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M4_MASK + : RISCV::PseudoVMSLT_VX_M4_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_M4; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M4; + break; + case RISCVVLMUL::LMUL_8: + VMSLTOpcode = + IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8 : RISCV::PseudoVMSLT_VX_M8; + VMSLTMaskOpcode = IsUnsigned ? RISCV::PseudoVMSLTU_VX_M8_MASK + : RISCV::PseudoVMSLT_VX_M8_MASK; + VMXOROpcode = RISCV::PseudoVMXOR_MM_M8; + VMANDNOTOpcode = RISCV::PseudoVMANDNOT_MM_M8; + break; + } + SDValue SEW = + CurDAG->getTargetConstant(Src1VT.getScalarSizeInBits(), DL, XLenVT); + SDValue VL; + selectVLOp(Node->getOperand(5), VL); + SDValue MaskedOff = Node->getOperand(1); + SDValue Mask = Node->getOperand(4); + // If the MaskedOff value and the Mask are the same value use + // vmslt{u}.vx vt, va, x; vmandnot.mm vd, vd, vt + // This avoids needing to copy v0 to vd before starting the next sequence. + if (Mask == MaskedOff) { + SDValue Cmp = SDValue( + CurDAG->getMachineNode(VMSLTOpcode, DL, VT, {Src1, Src2, VL, SEW}), + 0); + ReplaceNode(Node, CurDAG->getMachineNode(VMANDNOTOpcode, DL, VT, + {Mask, Cmp, VL, SEW})); + return; + } + + // Otherwise use + // vmslt{u}.vx vd, va, x, v0.t; vmxor.mm vd, vd, v0 + SDValue Cmp = SDValue( + CurDAG->getMachineNode(VMSLTMaskOpcode, DL, VT, + {MaskedOff, Src1, Src2, Mask, VL, SEW}), + 0); + ReplaceNode(Node, CurDAG->getMachineNode(VMXOROpcode, DL, VT, + {Cmp, Mask, VL, SEW})); + return; + } + } + break; + } case ISD::INTRINSIC_W_CHAIN: { unsigned IntNo = cast(Node->getOperand(1))->getZExtValue(); switch (IntNo) { diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -2220,6 +2220,24 @@ (op2_type op2_kind:$rs2), GPR:$vl, sew)>; +// Same as above but source operands are swapped. +class VPatBinaryNoMaskSwapped : + Pat<(result_type (!cast(intrinsic_name) + (op2_type op2_kind:$rs2), + (op1_type op1_reg_class:$rs1), + VLOpFrag)), + (!cast(inst) + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + GPR:$vl, sew)>; + class VPatBinaryMask; +// Same as above but source operands are swapped. +class VPatBinaryMaskSwapped : + Pat<(result_type (!cast(intrinsic_name#"_mask") + (result_type result_reg_class:$merge), + (op2_type op2_kind:$rs2), + (op1_type op1_reg_class:$rs1), + (mask_type V0), + VLOpFrag)), + (!cast(inst#"_MASK") + (result_type result_reg_class:$merge), + (op1_type op1_reg_class:$rs1), + (op2_type op2_kind:$rs2), + (mask_type V0), GPR:$vl, sew)>; + class VPatTernaryNoMask; } +multiclass VPatBinarySwapped +{ + def : VPatBinaryNoMaskSwapped; + def : VPatBinaryMaskSwapped; +} + multiclass VPatBinaryCarryIn; } +multiclass VPatBinarySwappedM_VV vtilist> { + foreach vti = vtilist in + defm : VPatBinarySwapped; +} + multiclass VPatBinaryM_VX vtilist> { foreach vti = vtilist in { @@ -3881,50 +3949,11 @@ defm : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>; // Match vmsgt with 2 vector operands to vmslt with the operands swapped. -// Occurs when legalizing vmsgt(u).vx intrinsics for i64 on RV32 since we need -// to use a more complex splat sequence. Add the pattern for all VTs for -// consistency. -foreach vti = AllIntegerVectors in { - def : Pat<(vti.Mask (int_riscv_vmsgt (vti.Vector vti.RegClass:$rs2), - (vti.Vector vti.RegClass:$rs1), - VLOpFrag)), - (!cast("PseudoVMSLT_VV_"#vti.LMul.MX) vti.RegClass:$rs1, - vti.RegClass:$rs2, - GPR:$vl, - vti.SEW)>; - def : Pat<(vti.Mask (int_riscv_vmsgt_mask (vti.Mask VR:$merge), - (vti.Vector vti.RegClass:$rs2), - (vti.Vector vti.RegClass:$rs1), - (vti.Mask V0), - VLOpFrag)), - (!cast("PseudoVMSLT_VV_"#vti.LMul.MX#"_MASK") - VR:$merge, - vti.RegClass:$rs1, - vti.RegClass:$rs2, - (vti.Mask V0), - GPR:$vl, - vti.SEW)>; +defm : VPatBinarySwappedM_VV<"int_riscv_vmsgtu", "PseudoVMSLTU", AllIntegerVectors>; +defm : VPatBinarySwappedM_VV<"int_riscv_vmsgt", "PseudoVMSLT", AllIntegerVectors>; - def : Pat<(vti.Mask (int_riscv_vmsgtu (vti.Vector vti.RegClass:$rs2), - (vti.Vector vti.RegClass:$rs1), - VLOpFrag)), - (!cast("PseudoVMSLTU_VV_"#vti.LMul.MX) vti.RegClass:$rs1, - vti.RegClass:$rs2, - GPR:$vl, - vti.SEW)>; - def : Pat<(vti.Mask (int_riscv_vmsgtu_mask (vti.Mask VR:$merge), - (vti.Vector vti.RegClass:$rs2), - (vti.Vector vti.RegClass:$rs1), - (vti.Mask V0), - VLOpFrag)), - (!cast("PseudoVMSLTU_VV_"#vti.LMul.MX#"_MASK") - VR:$merge, - vti.RegClass:$rs1, - vti.RegClass:$rs2, - (vti.Mask V0), - GPR:$vl, - vti.SEW)>; -} +defm : VPatBinarySwappedM_VV<"int_riscv_vmsgeu", "PseudoVMSLEU", AllIntegerVectors>; +defm : VPatBinarySwappedM_VV<"int_riscv_vmsge", "PseudoVMSLE", AllIntegerVectors>; // Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16. This // avoids the user needing to know that there is no vmslt(u).vi instruction. @@ -3991,6 +4020,67 @@ (vti.Mask V0), GPR:$vl, vti.SEW)>; + + def : Pat<(vti.Mask (int_riscv_vmsge (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + VLOpFrag)), + (!cast("PseudoVMSGT_VI_"#vti.LMul.MX) vti.RegClass:$rs1, + (DecImm simm5_plus1:$rs2), + GPR:$vl, + vti.SEW)>; + def : Pat<(vti.Mask (int_riscv_vmsge_mask (vti.Mask VR:$merge), + (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + (vti.Mask V0), + VLOpFrag)), + (!cast("PseudoVMSGT_VI_"#vti.LMul.MX#"_MASK") + VR:$merge, + vti.RegClass:$rs1, + (DecImm simm5_plus1:$rs2), + (vti.Mask V0), + GPR:$vl, + vti.SEW)>; + + def : Pat<(vti.Mask (int_riscv_vmsgeu (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + VLOpFrag)), + (!cast("PseudoVMSGTU_VI_"#vti.LMul.MX) vti.RegClass:$rs1, + (DecImm simm5_plus1:$rs2), + GPR:$vl, + vti.SEW)>; + def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge), + (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + (vti.Mask V0), + VLOpFrag)), + (!cast("PseudoVMSGTU_VI_"#vti.LMul.MX#"_MASK") + VR:$merge, + vti.RegClass:$rs1, + (DecImm simm5_plus1:$rs2), + (vti.Mask V0), + GPR:$vl, + vti.SEW)>; + + // Special cases to avoid matching vmsgeu.vi 0 (always true) to + // vmsgtu.vi -1 (always false). Instead match to vmsne.vv. + def : Pat<(vti.Mask (int_riscv_vmsgeu (vti.Vector vti.RegClass:$rs1), + (vti.Scalar 0), VLOpFrag)), + (!cast("PseudoVMSEQ_VV_"#vti.LMul.MX) vti.RegClass:$rs1, + vti.RegClass:$rs1, + GPR:$vl, + vti.SEW)>; + def : Pat<(vti.Mask (int_riscv_vmsgeu_mask (vti.Mask VR:$merge), + (vti.Vector vti.RegClass:$rs1), + (vti.Scalar 0), + (vti.Mask V0), + VLOpFrag)), + (!cast("PseudoVMSEQ_VV_"#vti.LMul.MX#"_MASK") + VR:$merge, + vti.RegClass:$rs1, + vti.RegClass:$rs1, + (vti.Mask V0), + GPR:$vl, + vti.SEW)>; } //===----------------------------------------------------------------------===// @@ -4179,6 +4269,8 @@ defm : VPatBinaryM_VV_VX<"int_riscv_vmfne", "PseudoVMFNE", AllFloatVectors>; defm : VPatBinaryM_VX<"int_riscv_vmfgt", "PseudoVMFGT", AllFloatVectors>; defm : VPatBinaryM_VX<"int_riscv_vmfge", "PseudoVMFGE", AllFloatVectors>; +defm : VPatBinarySwappedM_VV<"int_riscv_vmfgt", "PseudoVMFLT", AllFloatVectors>; +defm : VPatBinarySwappedM_VV<"int_riscv_vmfge", "PseudoVMFLE", AllFloatVectors>; //===----------------------------------------------------------------------===// // 14.14. Vector Floating-Point Classify Instruction diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv32.ll @@ -1,6 +1,630 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmfge.nxv1f16( + , + , + i32); + +define @intrinsic_vmfge_vv_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv1f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv1f16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfge.mask.nxv1f16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv2f16( + , + , + i32); + +define @intrinsic_vmfge_vv_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv2f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv2f16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfge.mask.nxv2f16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv4f16( + , + , + i32); + +define @intrinsic_vmfge_vv_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv4f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv4f16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfge.mask.nxv4f16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv8f16( + , + , + i32); + +define @intrinsic_vmfge_vv_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv8f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmfle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv8f16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfge.mask.nxv8f16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv16f16( + , + , + i32); + +define @intrinsic_vmfge_vv_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv16f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmfle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv16f16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfge.mask.nxv16f16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv1f32( + , + , + i32); + +define @intrinsic_vmfge_vv_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv1f32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfge.mask.nxv1f32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv2f32( + , + , + i32); + +define @intrinsic_vmfge_vv_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv2f32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfge.mask.nxv2f32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv4f32( + , + , + i32); + +define @intrinsic_vmfge_vv_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmfle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv4f32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfge.mask.nxv4f32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv8f32( + , + , + i32); + +define @intrinsic_vmfge_vv_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmfle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv8f32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfge.mask.nxv8f32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv1f64( + , + , + i32); + +define @intrinsic_vmfge_vv_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv1f64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv1f64( + , + , + , + , + i32); + +define @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv1f64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfge.mask.nxv1f64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv2f64( + , + , + i32); + +define @intrinsic_vmfge_vv_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv2f64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv2f64( + , + , + , + , + i32); + +define @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmfle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv2f64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfge.mask.nxv2f64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv4f64( + , + , + i32); + +define @intrinsic_vmfge_vv_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv4f64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv4f64( + , + , + , + , + i32); + +define @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmfle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv4f64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfge.mask.nxv4f64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + declare @llvm.riscv.vmfge.nxv1f16.f16( , half, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfge-rv64.ll @@ -1,6 +1,630 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmfge.nxv1f16( + , + , + i64); + +define @intrinsic_vmfge_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv1f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv1f16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfge.mask.nxv1f16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv2f16( + , + , + i64); + +define @intrinsic_vmfge_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv2f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv2f16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfge.mask.nxv2f16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv4f16( + , + , + i64); + +define @intrinsic_vmfge_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv4f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv4f16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfge.mask.nxv4f16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv8f16( + , + , + i64); + +define @intrinsic_vmfge_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv8f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmfle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv8f16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfge.mask.nxv8f16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv16f16( + , + , + i64); + +define @intrinsic_vmfge_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv16f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmfle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv16f16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfge.mask.nxv16f16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv1f32( + , + , + i64); + +define @intrinsic_vmfge_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv1f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv1f32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfge.mask.nxv1f32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv2f32( + , + , + i64); + +define @intrinsic_vmfge_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv2f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv2f32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfge.mask.nxv2f32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv4f32( + , + , + i64); + +define @intrinsic_vmfge_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv4f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmfle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv4f32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfge.mask.nxv4f32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv8f32( + , + , + i64); + +define @intrinsic_vmfge_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv8f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmfle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv8f32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfge.mask.nxv8f32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv1f64( + , + , + i64); + +define @intrinsic_vmfge_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv1f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmfle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmfle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv1f64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfge.mask.nxv1f64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv2f64( + , + , + i64); + +define @intrinsic_vmfge_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv2f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmfle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmfle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv2f64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfge.mask.nxv2f64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfge.nxv4f64( + , + , + i64); + +define @intrinsic_vmfge_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfge_vv_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfge.nxv4f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfge.mask.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfge_mask_vv_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmfle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmfle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfge.nxv4f64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfge.mask.nxv4f64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + declare @llvm.riscv.vmfge.nxv1f16.f16( , half, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv32.ll @@ -1,6 +1,630 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmfgt.nxv1f16( + , + , + i32); + +define @intrinsic_vmfgt_vv_nxv1f16_nxv1f16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv1f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv1f16( + , + , + , + , + i32); + +define @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv1f16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv1f16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv2f16( + , + , + i32); + +define @intrinsic_vmfgt_vv_nxv2f16_nxv2f16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv2f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv2f16( + , + , + , + , + i32); + +define @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv2f16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv2f16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv4f16( + , + , + i32); + +define @intrinsic_vmfgt_vv_nxv4f16_nxv4f16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv4f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv4f16( + , + , + , + , + i32); + +define @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv4f16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv4f16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv8f16( + , + , + i32); + +define @intrinsic_vmfgt_vv_nxv8f16_nxv8f16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv8f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv8f16( + , + , + , + , + i32); + +define @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmflt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv8f16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv8f16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv16f16( + , + , + i32); + +define @intrinsic_vmfgt_vv_nxv16f16_nxv16f16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv16f16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv16f16( + , + , + , + , + i32); + +define @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmflt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv16f16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv16f16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv1f32( + , + , + i32); + +define @intrinsic_vmfgt_vv_nxv1f32_nxv1f32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv1f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv1f32( + , + , + , + , + i32); + +define @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv1f32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv1f32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv2f32( + , + , + i32); + +define @intrinsic_vmfgt_vv_nxv2f32_nxv2f32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv2f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv2f32( + , + , + , + , + i32); + +define @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv2f32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv2f32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv4f32( + , + , + i32); + +define @intrinsic_vmfgt_vv_nxv4f32_nxv4f32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv4f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv4f32( + , + , + , + , + i32); + +define @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmflt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv4f32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv4f32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv8f32( + , + , + i32); + +define @intrinsic_vmfgt_vv_nxv8f32_nxv8f32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv8f32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv8f32( + , + , + , + , + i32); + +define @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmflt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv8f32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv8f32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv1f64( + , + , + i32); + +define @intrinsic_vmfgt_vv_nxv1f64_nxv1f64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv1f64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv1f64( + , + , + , + , + i32); + +define @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv1f64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv1f64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv2f64( + , + , + i32); + +define @intrinsic_vmfgt_vv_nxv2f64_nxv2f64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv2f64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv2f64( + , + , + , + , + i32); + +define @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmflt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv2f64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv2f64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv4f64( + , + , + i32); + +define @intrinsic_vmfgt_vv_nxv4f64_nxv4f64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv4f64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv4f64( + , + , + , + , + i32); + +define @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmflt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv4f64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv4f64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + declare @llvm.riscv.vmfgt.nxv1f16.f16( , half, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmfgt-rv64.ll @@ -1,6 +1,630 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmfgt.nxv1f16( + , + , + i64); + +define @intrinsic_vmfgt_vv_nxv1f16_nxv1f16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv1f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv1f16( + , + , + , + , + i64); + +define @intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f16_nxv1f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv1f16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv1f16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv2f16( + , + , + i64); + +define @intrinsic_vmfgt_vv_nxv2f16_nxv2f16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv2f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv2f16( + , + , + , + , + i64); + +define @intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f16_nxv2f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv2f16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv2f16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv4f16( + , + , + i64); + +define @intrinsic_vmfgt_vv_nxv4f16_nxv4f16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv4f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv4f16( + , + , + , + , + i64); + +define @intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f16_nxv4f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv4f16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv4f16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv8f16( + , + , + i64); + +define @intrinsic_vmfgt_vv_nxv8f16_nxv8f16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv8f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv8f16( + , + , + , + , + i64); + +define @intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f16_nxv8f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmflt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv8f16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv8f16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv16f16( + , + , + i64); + +define @intrinsic_vmfgt_vv_nxv16f16_nxv16f16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv16f16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv16f16( + , + , + , + , + i64); + +define @intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv16f16_nxv16f16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmflt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv16f16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv16f16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv1f32( + , + , + i64); + +define @intrinsic_vmfgt_vv_nxv1f32_nxv1f32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv1f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv1f32( + , + , + , + , + i64); + +define @intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f32_nxv1f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv1f32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv1f32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv2f32( + , + , + i64); + +define @intrinsic_vmfgt_vv_nxv2f32_nxv2f32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv2f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv2f32( + , + , + , + , + i64); + +define @intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f32_nxv2f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv2f32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv2f32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv4f32( + , + , + i64); + +define @intrinsic_vmfgt_vv_nxv4f32_nxv4f32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv4f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv4f32( + , + , + , + , + i64); + +define @intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f32_nxv4f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmflt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv4f32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv4f32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv8f32( + , + , + i64); + +define @intrinsic_vmfgt_vv_nxv8f32_nxv8f32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv8f32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv8f32( + , + , + , + , + i64); + +define @intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv8f32_nxv8f32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmflt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv8f32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv8f32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv1f64( + , + , + i64); + +define @intrinsic_vmfgt_vv_nxv1f64_nxv1f64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv1f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv1f64( + , + , + , + , + i64); + +define @intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv1f64_nxv1f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmflt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmflt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv1f64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv1f64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv2f64( + , + , + i64); + +define @intrinsic_vmfgt_vv_nxv2f64_nxv2f64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv2f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv2f64( + , + , + , + , + i64); + +define @intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv2f64_nxv2f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmflt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmflt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv2f64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv2f64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmfgt.nxv4f64( + , + , + i64); + +define @intrinsic_vmfgt_vv_nxv4f64_nxv4f64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_vv_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmfgt.nxv4f64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmfgt.mask.nxv4f64( + , + , + , + , + i64); + +define @intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmfgt_mask_vv_nxv4f64_nxv4f64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmflt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmflt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmfgt.nxv4f64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmfgt.mask.nxv4f64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + declare @llvm.riscv.vmfgt.nxv1f16.f16( , half, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv32.ll @@ -0,0 +1,2841 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsge.nxv1i8( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i8( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i8( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i8( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv8i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i8( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv16i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv32i8( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv32i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i16( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i16( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i16( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i16( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv8i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i16( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv16i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i32( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i32( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i32( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i32( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv8i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i64( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i64( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i64( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i64( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i64( + , + , + i32); + +define @intrinsic_vmsge_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i64( + , + , + , + , + i32); + +define @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vmsge_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vmsge_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vmsge_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vmsge_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vmsge_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vmsge_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vmsge_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vmsge_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vmsge_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vmsge_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vmsge_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vmsge_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vmsge_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vmsge_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vmsge_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i64.i64( + , + i64, + i32); + +define @intrinsic_vmsge_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vmsle.vv v0, v25, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i64.i64( + %0, + i64 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i64.i64( + , + , + i64, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v27, a0 +; CHECK-NEXT: vsll.vx v27, v27, a1 +; CHECK-NEXT: vsrl.vx v27, v27, a1 +; CHECK-NEXT: vor.vv v26, v27, v26 +; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i64.i64( + , + i64, + i32); + +define @intrinsic_vmsge_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vmsle.vv v0, v26, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i64.i64( + %0, + i64 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i64.i64( + , + , + i64, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i64.i64( + , + i64, + i32); + +define @intrinsic_vmsge_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v12, a0 +; CHECK-NEXT: vsll.vx v12, v12, a1 +; CHECK-NEXT: vsrl.vx v12, v12, a1 +; CHECK-NEXT: vor.vv v28, v12, v28 +; CHECK-NEXT: vmsle.vv v0, v28, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i64.i64( + %0, + i64 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i64.i64( + , + , + i64, + , + i32); + +define @intrinsic_vmsge_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v16, a0 +; CHECK-NEXT: vsll.vx v16, v16, a1 +; CHECK-NEXT: vsrl.vx v16, v16, a1 +; CHECK-NEXT: vor.vv v28, v16, v28 +; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsle.vv v25, v28, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i8.i8( + %0, + i8 -15, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -15, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( + %0, + %1, + i8 -14, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -14 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i8.i8( + %0, + i8 -13, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -13, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( + %0, + %1, + i8 -12, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i8.i8( + %0, + i8 -11, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -11, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( + %0, + %1, + i8 -10, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv8i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i8.i8( + %0, + i8 -9, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( + %0, + %1, + i8 -8, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv16i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i8.i8( + %0, + i8 -7, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, -7, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( + %0, + %1, + i8 -6, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv32i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -6 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv32i8.i8( + %0, + i8 -5, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, -5, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( + %0, + %1, + i8 -4, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -4 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i16.i16( + %0, + i16 -3, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -3, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( + %0, + %1, + i16 -2, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -2 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i16.i16( + %0, + i16 -1, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -1, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( + %0, + %1, + i16 0, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -1 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i16.i16( + %0, + i16 0, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 0, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv8i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 1 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i16.i16( + %0, + i16 2, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, 2, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( + %0, + %1, + i16 3, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv16i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 3 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i16.i16( + %0, + i16 4, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( + %0, + %1, + i16 5, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 5 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i32.i32( + %0, + i32 6, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 6, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( + %0, + %1, + i32 7, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 7 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i32.i32( + %0, + i32 8, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i32.i32( + %0, + i32 10, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, 10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( + %0, + %1, + i32 11, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv8i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 11 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i32.i32( + %0, + i32 12, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, 12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( + %0, + %1, + i32 13, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i64_i64( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i64.i64( + %0, + i64 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( + %0, + %1, + i64 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i64_i64( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i64.i64( + %0, + i64 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( + %0, + %1, + i64 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i64_i64( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i64.i64( + %0, + i64 9, + i32 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( + %0, + %1, + i64 9, + %2, + i32 %3) + + ret %a +} + +; Test cases where the mask and maskedoff are the same value. +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v27, a0 +; CHECK-NEXT: vsll.vx v27, v27, a1 +; CHECK-NEXT: vsrl.vx v27, v27, a1 +; CHECK-NEXT: vor.vv v26, v27, v26 +; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v12, a0 +; CHECK-NEXT: vsll.vx v12, v12, a1 +; CHECK-NEXT: vsrl.vx v12, v12, a1 +; CHECK-NEXT: vor.vv v28, v12, v28 +; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v28, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %0, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsge-rv64.ll @@ -0,0 +1,2775 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsge.nxv1i8( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i8( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i8( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i8( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv8i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i8( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv16i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv32i8( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv32i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i16( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i16( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i16( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i16( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv8i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i16( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv16i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i32( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i32( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i32( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i32( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv8i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i64( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsle.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmsle.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv1i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i64( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsle.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmsle.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv2i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i64( + , + , + i64); + +define @intrinsic_vmsge_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsle.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmsle.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsge.nxv4i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsge.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vmsge_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vmsge_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vmsge_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vmsge_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vmsge_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vmsge_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vmsge_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vmsge_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vmsge_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vmsge_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vmsge_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vmsge_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vmsge_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vmsge_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vmsge_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vmsge_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vmsge_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsge.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vmsge_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsge.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmsge_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmslt.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i8.i8( + %0, + i8 -15, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -15, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( + %0, + %1, + i8 -14, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -14 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i8.i8( + %0, + i8 -13, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -13, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( + %0, + %1, + i8 -12, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i8.i8( + %0, + i8 -11, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -11, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( + %0, + %1, + i8 -10, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv8i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i8.i8( + %0, + i8 -9, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( + %0, + %1, + i8 -8, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv16i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i8.i8( + %0, + i8 -7, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, -7, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( + %0, + %1, + i8 -6, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv32i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -6 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv32i8.i8( + %0, + i8 -5, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, -5, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( + %0, + %1, + i8 -4, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -4 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i16.i16( + %0, + i16 -3, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -3, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( + %0, + %1, + i16 -2, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -2 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i16.i16( + %0, + i16 -1, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, -1, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( + %0, + %1, + i16 0, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -1 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i16.i16( + %0, + i16 0, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 0, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv8i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 1 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i16.i16( + %0, + i16 2, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, 2, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( + %0, + %1, + i16 3, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv16i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 3 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv16i16.i16( + %0, + i16 4, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( + %0, + %1, + i16 5, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 5 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i32.i32( + %0, + i32 6, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 6, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( + %0, + %1, + i32 7, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 7 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i32.i32( + %0, + i32 8, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i32.i32( + %0, + i32 10, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, 10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( + %0, + %1, + i32 11, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv8i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 11 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv8i32.i32( + %0, + i32 12, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, 12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( + %0, + %1, + i32 13, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv1i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 13 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv1i64.i64( + %0, + i64 14, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgt.vi v25, v8, 14, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( + %0, + %1, + i64 15, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv2i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, 15 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv2i64.i64( + %0, + i64 16, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgt.vi v25, v8, -16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( + %0, + %1, + i64 -15, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_vi_nxv4i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsgt.vi v0, v8, -15 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.nxv4i64.i64( + %0, + i64 -14, + i64 %1) + + ret %a +} + +define @intrinsic_vmsge_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgt.vi v25, v8, -14, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( + %0, + %1, + i64 -13, + %2, + i64 %3) + + ret %a +} + +; Test cases where the mask and maskedoff are the same value. +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vmslt.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsge.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %0, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv32.ll @@ -0,0 +1,2841 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsgeu.nxv1i8( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i8( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i8( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i8( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv8i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i8( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv16i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv32i8( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv32i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i16( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i16( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i16( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i16( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv8i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i16( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv16i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i32( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i32( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i32( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i32( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv8i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i64( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i64( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i64( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i64( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i64( + , + , + i32); + +define @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i64( + , + , + , + , + i32); + +define @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i8.i8( + , + i8, + i32); + +define @intrinsic_vmsgeu_vx_nxv1i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i8.i8( + , + i8, + i32); + +define @intrinsic_vmsgeu_vx_nxv2i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i8.i8( + , + i8, + i32); + +define @intrinsic_vmsgeu_vx_nxv4i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i8.i8( + , + i8, + i32); + +define @intrinsic_vmsgeu_vx_nxv8i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i8.i8( + , + i8, + i32); + +define @intrinsic_vmsgeu_vx_nxv16i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv32i8.i8( + , + i8, + i32); + +define @intrinsic_vmsgeu_vx_nxv32i8_i8( %0, i8 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( + %0, + i8 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + , + , + i8, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i16.i16( + , + i16, + i32); + +define @intrinsic_vmsgeu_vx_nxv1i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i16.i16( + , + i16, + i32); + +define @intrinsic_vmsgeu_vx_nxv2i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i16.i16( + , + i16, + i32); + +define @intrinsic_vmsgeu_vx_nxv4i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i16.i16( + , + i16, + i32); + +define @intrinsic_vmsgeu_vx_nxv8i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i16.i16( + , + i16, + i32); + +define @intrinsic_vmsgeu_vx_nxv16i16_i16( %0, i16 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( + %0, + i16 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + , + , + i16, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i32.i32( + , + i32, + i32); + +define @intrinsic_vmsgeu_vx_nxv1i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i32.i32( + , + i32, + i32); + +define @intrinsic_vmsgeu_vx_nxv2i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i32.i32( + , + i32, + i32); + +define @intrinsic_vmsgeu_vx_nxv4i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i32.i32( + , + i32, + i32); + +define @intrinsic_vmsgeu_vx_nxv8i32_i32( %0, i32 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( + %0, + i32 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + , + , + i32, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i64.i64( + , + i64, + i32); + +define @intrinsic_vmsgeu_vx_nxv1i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v25, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v25, v25, a1 +; CHECK-NEXT: vmv.v.x v26, a0 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vsrl.vx v26, v26, a1 +; CHECK-NEXT: vor.vv v25, v26, v25 +; CHECK-NEXT: vmsleu.vv v0, v25, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( + %0, + i64 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + , + , + i64, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v27, a0 +; CHECK-NEXT: vsll.vx v27, v27, a1 +; CHECK-NEXT: vsrl.vx v27, v27, a1 +; CHECK-NEXT: vor.vv v26, v27, v26 +; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i64.i64( + , + i64, + i32); + +define @intrinsic_vmsgeu_vx_nxv2i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vmsleu.vv v0, v26, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( + %0, + i64 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + , + , + i64, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i64.i64( + , + i64, + i32); + +define @intrinsic_vmsgeu_vx_nxv4i64_i64( %0, i64 %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a2, a2, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v12, a0 +; CHECK-NEXT: vsll.vx v12, v12, a1 +; CHECK-NEXT: vsrl.vx v12, v12, a1 +; CHECK-NEXT: vor.vv v28, v12, v28 +; CHECK-NEXT: vmsleu.vv v0, v28, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( + %0, + i64 %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + , + , + i64, + , + i32); + +define @intrinsic_vmsgeu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v16, a0 +; CHECK-NEXT: vsll.vx v16, v16, a1 +; CHECK-NEXT: vsrl.vx v16, v16, a1 +; CHECK-NEXT: vor.vv v28, v16, v28 +; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsleu.vv v25, v28, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i32 %4) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( + %0, + i8 -15, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -15, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + %0, + %1, + i8 -14, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -14 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( + %0, + i8 -13, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -13, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + %0, + %1, + i8 -12, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( + %0, + i8 -11, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -11, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + %0, + %1, + i8 -10, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv8i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( + %0, + i8 -9, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + %0, + %1, + i8 -8, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv16i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( + %0, + i8 -7, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, -7, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + %0, + %1, + i8 -6, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv32i8_i8( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -6 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( + %0, + i8 -5, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, -5, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + %0, + %1, + i8 -4, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -4 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( + %0, + i16 -3, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -3, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + %0, + %1, + i16 -2, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -2 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( + %0, + i16 -1, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmseq.vv v25, v8, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + %0, + %1, + i16 0, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmseq.vv v0, v8, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( + %0, + i16 0, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 0, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv8i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 1 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( + %0, + i16 2, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, 2, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + %0, + %1, + i16 3, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv16i16_i16( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 3 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( + %0, + i16 4, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + %0, + %1, + i16 5, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 5 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( + %0, + i32 6, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 6, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + %0, + %1, + i32 7, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 7 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( + %0, + i32 8, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( + %0, + i32 10, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, 10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + %0, + %1, + i32 11, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv8i32_i32( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 11 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( + %0, + i32 12, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, 12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + %0, + %1, + i32 13, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i64_i64( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 13 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( + %0, + i64 14, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i64_i64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 14, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + %0, + %1, + i64 15, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i64_i64( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 15 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( + %0, + i64 16, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i64_i64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, -16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + %0, + %1, + i64 -15, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i64_i64( %0, i32 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -15 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( + %0, + i64 -14, + i32 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i64_i64( %0, %1, %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, -14, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + %0, + %1, + i64 -13, + %2, + i32 %3) + + ret %a +} + +; Test cases where the mask and maskedoff are the same value. +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a3, a2, e64,m1,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v27, a0 +; CHECK-NEXT: vsll.vx v27, v27, a1 +; CHECK-NEXT: vsrl.vx v27, v27, a1 +; CHECK-NEXT: vor.vv v26, v27, v26 +; CHECK-NEXT: vsetvli a0, a2, e64,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a3, a2, e64,m2,ta,mu +; CHECK-NEXT: vmv.v.x v26, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v26, v26, a1 +; CHECK-NEXT: vmv.v.x v28, a0 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vsrl.vx v28, v28, a1 +; CHECK-NEXT: vor.vv v26, v28, v26 +; CHECK-NEXT: vsetvli a0, a2, e64,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v26, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %0, + i32 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i32 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a3, a2, e64,m4,ta,mu +; CHECK-NEXT: vmv.v.x v28, a1 +; CHECK-NEXT: addi a1, zero, 32 +; CHECK-NEXT: vsll.vx v28, v28, a1 +; CHECK-NEXT: vmv.v.x v12, a0 +; CHECK-NEXT: vsll.vx v12, v12, a1 +; CHECK-NEXT: vsrl.vx v12, v12, a1 +; CHECK-NEXT: vor.vv v28, v12, v28 +; CHECK-NEXT: vsetvli a0, a2, e64,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v28, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %0, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgeu-rv64.ll @@ -0,0 +1,2775 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsgeu.nxv1i8( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i8( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i8( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i8( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv8i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i8( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv16i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv32i8( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv32i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i16( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i16( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i16( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i16( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv8i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i16( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv16i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i32( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i32( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i32( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i32( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv8i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i64( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv1i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i64( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv2i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i64( + , + , + i64); + +define @intrinsic_vmsgeu_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsleu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmsleu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgeu.nxv4i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i8.i8( + , + i8, + i64); + +define @intrinsic_vmsgeu_vx_nxv1i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv1i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i8.i8( + , + i8, + i64); + +define @intrinsic_vmsgeu_vx_nxv2i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv2i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i8.i8( + , + i8, + i64); + +define @intrinsic_vmsgeu_vx_nxv4i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv4i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i8.i8( + , + i8, + i64); + +define @intrinsic_vmsgeu_vx_nxv8i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv8i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i8.i8( + , + i8, + i64); + +define @intrinsic_vmsgeu_vx_nxv16i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv16i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv32i8.i8( + , + i8, + i64); + +define @intrinsic_vmsgeu_vx_nxv32i8_i8( %0, i8 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( + %0, + i8 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + , + , + i8, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv32i8_i8( %0, %1, i8 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e8,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i16.i16( + , + i16, + i64); + +define @intrinsic_vmsgeu_vx_nxv1i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv1i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i16.i16( + , + i16, + i64); + +define @intrinsic_vmsgeu_vx_nxv2i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv2i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i16.i16( + , + i16, + i64); + +define @intrinsic_vmsgeu_vx_nxv4i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv4i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i16.i16( + , + i16, + i64); + +define @intrinsic_vmsgeu_vx_nxv8i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv8i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv16i16.i16( + , + i16, + i64); + +define @intrinsic_vmsgeu_vx_nxv16i16_i16( %0, i16 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( + %0, + i16 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + , + , + i16, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv16i16_i16( %0, %1, i16 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e16,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i32.i32( + , + i32, + i64); + +define @intrinsic_vmsgeu_vx_nxv1i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv1i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i32.i32( + , + i32, + i64); + +define @intrinsic_vmsgeu_vx_nxv2i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv2i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i32.i32( + , + i32, + i64); + +define @intrinsic_vmsgeu_vx_nxv4i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv4i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv8i32.i32( + , + i32, + i64); + +define @intrinsic_vmsgeu_vx_nxv8i32_i32( %0, i32 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( + %0, + i32 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + , + , + i32, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv8i32_i32( %0, %1, i32 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e32,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv1i64.i64( + , + i64, + i64); + +define @intrinsic_vmsgeu_vx_nxv1i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv1i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e64,m1,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv2i64.i64( + , + i64, + i64); + +define @intrinsic_vmsgeu_vx_nxv2i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv2i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e64,m2,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgeu.nxv4i64.i64( + , + i64, + i64); + +define @intrinsic_vmsgeu_vx_nxv4i64_i64( %0, i64 %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmnand.mm v0, v25, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( + %0, + i64 %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + , + , + i64, + , + i64); + +define @intrinsic_vmsgeu_mask_vx_nxv4i64_i64( %0, %1, i64 %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a2, a1, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsltu.vx v25, v8, a0, v0.t +; CHECK-NEXT: vsetvli a0, a1, e64,m4,ta,mu +; CHECK-NEXT: vmxor.mm v0, v25, v12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %3, + i64 %4) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -16 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i8.i8( + %0, + i8 -15, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -15, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + %0, + %1, + i8 -14, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -14 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i8.i8( + %0, + i8 -13, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -13, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + %0, + %1, + i8 -12, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -12 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i8.i8( + %0, + i8 -11, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -11, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + %0, + %1, + i8 -10, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv8i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -10 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i8.i8( + %0, + i8 -9, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + %0, + %1, + i8 -8, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv16i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i8.i8( + %0, + i8 -7, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, -7, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + %0, + %1, + i8 -6, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv32i8_i8( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -6 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv32i8.i8( + %0, + i8 -5, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, -5, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + %0, + %1, + i8 -4, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -4 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i16.i16( + %0, + i16 -3, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, -3, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + %0, + %1, + i16 -2, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -2 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i16.i16( + %0, + i16 -1, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmseq.vv v25, v8, v8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + %0, + %1, + i16 0, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmseq.vv v0, v8, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i16.i16( + %0, + i16 0, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 0, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv8i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 1 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i16.i16( + %0, + i16 2, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, 2, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + %0, + %1, + i16 3, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv16i16_i16( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 3 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv16i16.i16( + %0, + i16 4, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, 4, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + %0, + %1, + i16 5, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 5 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i32.i32( + %0, + i32 6, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 6, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + %0, + %1, + i32 7, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 7 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i32.i32( + %0, + i32 8, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 8, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 9 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i32.i32( + %0, + i32 10, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, 10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + %0, + %1, + i32 11, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv8i32_i32( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 11 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv8i32.i32( + %0, + i32 12, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, 12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + %0, + %1, + i32 13, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv1i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 13 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv1i64.i64( + %0, + i64 14, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmv1r.v v0, v9 +; CHECK-NEXT: vmsgtu.vi v25, v8, 14, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + %0, + %1, + i64 15, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv2i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, 15 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv2i64.i64( + %0, + i64 16, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmv1r.v v0, v10 +; CHECK-NEXT: vmsgtu.vi v25, v8, -16, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + %0, + %1, + i64 -15, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_vi_nxv4i64_i64( %0, i64 %1) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsgtu.vi v0, v8, -15 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.nxv4i64.i64( + %0, + i64 -14, + i64 %1) + + ret %a +} + +define @intrinsic_vmsgeu_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_mask_vi_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmv1r.v v0, v12 +; CHECK-NEXT: vmsgtu.vi v25, v8, -14, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + %0, + %1, + i64 -13, + %2, + i64 %3) + + ret %a +} + +; Test cases where the mask and maskedoff are the same value. +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8( %0, %1, i8 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv32i8_i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv32i8.i8( + %0, + %1, + i8 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16( %0, %1, i16 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv16i16_i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv16i16.i16( + %0, + %1, + i16 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32( %0, %1, i32 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv8i32_i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv8i32.i32( + %0, + %1, + i32 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv1i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m1,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv1i64.i64( + %0, + %1, + i64 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv2i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m2,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv2i64.i64( + %0, + %1, + i64 %2, + %0, + i64 %3) + + ret %a +} + +define @intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64( %0, %1, i64 %2, i64 %3) nounwind { +; CHECK-LABEL: intrinsic_vmsgeu_maskedoff_mask_vx_nxv4i64_i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a1, a1, e64,m4,ta,mu +; CHECK-NEXT: vmsltu.vx v25, v8, a0 +; CHECK-NEXT: vmandnot.mm v0, v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgeu.mask.nxv4i64.i64( + %0, + %1, + i64 %2, + %0, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv32.ll @@ -1,6 +1,942 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsgt.nxv1i8( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i8( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i8( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv8i8( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv8i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv16i8( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv16i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv32i8( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv32i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv1i16( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i16( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i16( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv8i16( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv8i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv16i16( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv16i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv1i32( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i32( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i32( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv8i32( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv8i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv1i64( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i64( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i64( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i64( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i64( + , + , + i32); + +define @intrinsic_vmsgt_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i64( + , + , + , + , + i32); + +define @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + declare @llvm.riscv.vmsgt.nxv1i8.i8( , i8, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgt-rv64.ll @@ -1,6 +1,942 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsgt.nxv1i8( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i8( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i8( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv8i8( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv8i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv16i8( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv16i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv32i8( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv32i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv1i16( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i16( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i16( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv8i16( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv8i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv16i16( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv16i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv1i32( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i32( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i32( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv8i32( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv8i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv1i64( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmslt.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmslt.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv1i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv2i64( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmslt.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmslt.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv2i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgt.nxv4i64( + , + , + i64); + +define @intrinsic_vmsgt_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgt.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgt.mask.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgt_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmslt.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmslt.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgt.nxv4i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgt.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + declare @llvm.riscv.vmsgt.nxv1i8.i8( , i8, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv32.ll @@ -1,6 +1,942 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsgtu.nxv1i8( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i8( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i8( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i8( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i8( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i8( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv8i8( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv8i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv8i8( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv8i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv16i8( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv16i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv16i8( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv16i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv32i8( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv32i8( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv32i8( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv32i8( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv1i16( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i16( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i16( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i16( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i16( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i16( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv8i16( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv8i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv8i16( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv8i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv16i16( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv16i16( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv16i16( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv16i16( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv1i32( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i32( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i32( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i32( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i32( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i32( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv8i32( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv8i32( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv8i32( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv8i32( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv1i64( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i64( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i64( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i64( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i64( + , + , + i32); + +define @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64( %0, %1, i32 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i64( + %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i64( + , + , + , + , + i32); + +define @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i32 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i64( + %1, + %2, + i32 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i32 %4) + + ret %a +} + declare @llvm.riscv.vmsgtu.nxv1i8.i8( , i8, diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsgtu-rv64.ll @@ -1,6 +1,942 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \ ; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vmsgtu.nxv1i8( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv1i8_nxv1i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i8( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i8_nxv1i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf8,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf8,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i8( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv2i8_nxv2i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i8( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i8_nxv2i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i8( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv4i8_nxv4i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i8( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i8_nxv4i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,mf2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv8i8( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv8i8_nxv8i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv8i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv8i8( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i8_nxv8i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv8i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv8i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv16i8( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv16i8_nxv16i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv16i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv16i8( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i8_nxv16i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv16i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv16i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv32i8( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv32i8_nxv32i8( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv32i8( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv32i8( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv32i8_nxv32i8: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e8,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e8,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv32i8( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv32i8( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv1i16( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv1i16_nxv1i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i16( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i16_nxv1i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i16( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv2i16_nxv2i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i16( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i16_nxv2i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,mf2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i16( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv4i16_nxv4i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i16( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i16_nxv4i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv8i16( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv8i16_nxv8i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv8i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv8i16( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i16_nxv8i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv8i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv8i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv16i16( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv16i16_nxv16i16( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv16i16( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv16i16( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv16i16_nxv16i16: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e16,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e16,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv16i16( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv16i16( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv1i32( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv1i32_nxv1i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i32( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i32_nxv1i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,mf2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,mf2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i32( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv2i32_nxv2i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i32( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i32_nxv2i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i32( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv4i32_nxv4i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i32( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i32_nxv4i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv8i32( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv8i32_nxv8i32( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv8i32( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv8i32( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv8i32_nxv8i32: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e32,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e32,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv8i32( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv8i32( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv1i64( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv1i64_nxv1i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv1i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv1i64( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv1i64_nxv1i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m1,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v9, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m1,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v10, v9, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv1i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv1i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv2i64( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv2i64_nxv2i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv2i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv2i64( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv2i64_nxv2i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m2,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v10, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m2,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v12, v10, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv2i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv2i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vmsgtu.nxv4i64( + , + , + i64); + +define @intrinsic_vmsgtu_vv_nxv4i64_nxv4i64( %0, %1, i64 %2) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vsetvli a0, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %a = call @llvm.riscv.vmsgtu.nxv4i64( + %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vmsgtu.mask.nxv4i64( + , + , + , + , + i64); + +define @intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64( %0, %1, %2, %3, i64 %4) nounwind { +; CHECK-LABEL: intrinsic_vmsgtu_mask_vv_nxv4i64_nxv4i64: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vmv1r.v v25, v0 +; CHECK-NEXT: vsetvli a1, a0, e64,m4,ta,mu +; CHECK-NEXT: vmsltu.vv v0, v12, v8 +; CHECK-NEXT: vsetvli a0, a0, e64,m4,tu,mu +; CHECK-NEXT: vmsltu.vv v25, v16, v12, v0.t +; CHECK-NEXT: vmv1r.v v0, v25 +; CHECK-NEXT: jalr zero, 0(ra) +entry: + %mask = call @llvm.riscv.vmsgtu.nxv4i64( + %1, + %2, + i64 %4) + %a = call @llvm.riscv.vmsgtu.mask.nxv4i64( + %0, + %2, + %3, + %mask, + i64 %4) + + ret %a +} + declare @llvm.riscv.vmsgtu.nxv1i8.i8( , i8,