diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -35,6 +35,11 @@ return SDValue(N, 0); }]>; +def DecImm : SDNodeXFormgetTargetConstant(N->getSExtValue() - 1, SDLoc(N), + N->getValueType(0)); +}]>; + //===----------------------------------------------------------------------===// // Utilities. //===----------------------------------------------------------------------===// @@ -3080,6 +3085,71 @@ defm "" : VPatBinaryM_VX_VI<"int_riscv_vmsgtu", "PseudoVMSGTU", AllIntegerVectors>; defm "" : VPatBinaryM_VX_VI<"int_riscv_vmsgt", "PseudoVMSGT", AllIntegerVectors>; +// Match vmslt(u).vx intrinsics to vmsle(u).vi if the scalar is -15 to 16. This +// avoids the user needing to know that there is no vmslt(u).vi instruction. +// This is limited to vmslt(u).vx as there is no vmsge().vx intrinsic or +// instruction. +foreach vti = AllIntegerVectors in { + def : Pat<(vti.Mask (int_riscv_vmslt (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), GPR:$vl)), + (!cast("PseudoVMSLE_VI_"#vti.LMul.MX) vti.RegClass:$rs1, + (DecImm simm5_plus1:$rs2), + (NoX0 GPR:$vl), + vti.SEW)>; + def : Pat<(vti.Mask (int_riscv_vmslt_mask (vti.Mask V0), + (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + (vti.Mask VR:$merge), + GPR:$vl)), + (!cast("PseudoVMSLE_VI_"#vti.LMul.MX#"_MASK") + VR:$merge, + vti.RegClass:$rs1, + (DecImm simm5_plus1:$rs2), + (vti.Mask V0), + (NoX0 GPR:$vl), + vti.SEW)>; + + def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), GPR:$vl)), + (!cast("PseudoVMSLEU_VI_"#vti.LMul.MX) vti.RegClass:$rs1, + (DecImm simm5_plus1:$rs2), + (NoX0 GPR:$vl), + vti.SEW)>; + def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask V0), + (vti.Vector vti.RegClass:$rs1), + (vti.Scalar simm5_plus1:$rs2), + (vti.Mask VR:$merge), + GPR:$vl)), + (!cast("PseudoVMSLEU_VI_"#vti.LMul.MX#"_MASK") + VR:$merge, + vti.RegClass:$rs1, + (DecImm simm5_plus1:$rs2), + (vti.Mask V0), + (NoX0 GPR:$vl), + vti.SEW)>; + + // Special cases to avoid matching vmsltu.vi 0 (always false) to + // vmsleu.vi -1 (always true). Instead match to vmsne.vv. + def : Pat<(vti.Mask (int_riscv_vmsltu (vti.Vector vti.RegClass:$rs1), + (vti.Scalar 0), GPR:$vl)), + (!cast("PseudoVMSNE_VV_"#vti.LMul.MX) vti.RegClass:$rs1, + vti.RegClass:$rs1, + (NoX0 GPR:$vl), + vti.SEW)>; + def : Pat<(vti.Mask (int_riscv_vmsltu_mask (vti.Mask V0), + (vti.Vector vti.RegClass:$rs1), + (vti.Scalar 0), + (vti.Mask VR:$merge), + GPR:$vl)), + (!cast("PseudoVMSNE_VV_"#vti.LMul.MX#"_MASK") + VR:$merge, + vti.RegClass:$rs1, + vti.RegClass:$rs1, + (vti.Mask V0), + (NoX0 GPR:$vl), + vti.SEW)>; +} + //===----------------------------------------------------------------------===// // 12.9. Vector Integer Min/Max Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv32.ll @@ -1259,3 +1259,423 @@ ret %a } + +define @intrinsic_vmslt_vi_nxv1i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -16 + %a = call @llvm.riscv.vmslt.nxv1i8.i8( + %0, + i8 -15, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -15, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv1i8.i8( + %0, + %1, + i8 -14, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv2i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -14 + %a = call @llvm.riscv.vmslt.nxv2i8.i8( + %0, + i8 -13, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -13, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv2i8.i8( + %0, + %1, + i8 -12, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv4i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -12 + %a = call @llvm.riscv.vmslt.nxv4i8.i8( + %0, + i8 -11, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -11, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv4i8.i8( + %0, + %1, + i8 -10, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv8i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -10 + %a = call @llvm.riscv.vmslt.nxv8i8.i8( + %0, + i8 -9, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv8i8.i8( + %0, + %1, + i8 -8, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv16i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -8 + %a = call @llvm.riscv.vmslt.nxv16i8.i8( + %0, + i8 -7, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -7, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv16i8.i8( + %0, + %1, + i8 -6, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv32i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -6 + %a = call @llvm.riscv.vmslt.nxv32i8.i8( + %0, + i8 -5, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -5, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv32i8.i8( + %0, + %1, + i8 -4, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv1i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -4 + %a = call @llvm.riscv.vmslt.nxv1i16.i16( + %0, + i16 -3, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -3, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv1i16.i16( + %0, + %1, + i16 -2, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv2i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -2 + %a = call @llvm.riscv.vmslt.nxv2i16.i16( + %0, + i16 -1, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -1, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv2i16.i16( + %0, + %1, + i16 0, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv4i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -1 + %a = call @llvm.riscv.vmslt.nxv4i16.i16( + %0, + i16 0, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 0, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv8i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 1 + %a = call @llvm.riscv.vmslt.nxv8i16.i16( + %0, + i16 2, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 2, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv8i16.i16( + %0, + %1, + i16 3, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv16i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 3 + %a = call @llvm.riscv.vmslt.nxv16i16.i16( + %0, + i16 4, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 4, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv16i16.i16( + %0, + %1, + i16 5, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv1i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 5 + %a = call @llvm.riscv.vmslt.nxv1i32.i32( + %0, + i32 6, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 6, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv1i32.i32( + %0, + %1, + i32 7, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv2i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 7 + %a = call @llvm.riscv.vmslt.nxv2i32.i32( + %0, + i32 8, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 8, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv4i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmslt.nxv4i32.i32( + %0, + i32 10, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 10, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv4i32.i32( + %0, + %1, + i32 11, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv8i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 11 + %a = call @llvm.riscv.vmslt.nxv8i32.i32( + %0, + i32 12, + i32 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 12, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv8i32.i32( + %0, + %1, + i32 13, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmslt-rv64.ll @@ -1511,3 +1511,507 @@ ret %a } + +define @intrinsic_vmslt_vi_nxv1i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -16 + %a = call @llvm.riscv.vmslt.nxv1i8.i8( + %0, + i8 -15, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -15, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv1i8.i8( + %0, + %1, + i8 -14, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv2i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -14 + %a = call @llvm.riscv.vmslt.nxv2i8.i8( + %0, + i8 -13, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -13, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv2i8.i8( + %0, + %1, + i8 -12, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv4i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -12 + %a = call @llvm.riscv.vmslt.nxv4i8.i8( + %0, + i8 -11, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -11, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv4i8.i8( + %0, + %1, + i8 -10, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv8i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -10 + %a = call @llvm.riscv.vmslt.nxv8i8.i8( + %0, + i8 -9, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv8i8.i8( + %0, + %1, + i8 -8, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv16i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -8 + %a = call @llvm.riscv.vmslt.nxv16i8.i8( + %0, + i8 -7, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -7, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv16i8.i8( + %0, + %1, + i8 -6, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv32i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -6 + %a = call @llvm.riscv.vmslt.nxv32i8.i8( + %0, + i8 -5, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -5, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv32i8.i8( + %0, + %1, + i8 -4, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv1i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -4 + %a = call @llvm.riscv.vmslt.nxv1i16.i16( + %0, + i16 -3, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -3, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv1i16.i16( + %0, + %1, + i16 -2, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv2i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -2 + %a = call @llvm.riscv.vmslt.nxv2i16.i16( + %0, + i16 -1, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -1, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv2i16.i16( + %0, + %1, + i16 0, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv4i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -1 + %a = call @llvm.riscv.vmslt.nxv4i16.i16( + %0, + i16 0, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 0, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv8i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 1 + %a = call @llvm.riscv.vmslt.nxv8i16.i16( + %0, + i16 2, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 2, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv8i16.i16( + %0, + %1, + i16 3, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv16i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 3 + %a = call @llvm.riscv.vmslt.nxv16i16.i16( + %0, + i16 4, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 4, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv16i16.i16( + %0, + %1, + i16 5, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv1i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 5 + %a = call @llvm.riscv.vmslt.nxv1i32.i32( + %0, + i32 6, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 6, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv1i32.i32( + %0, + %1, + i32 7, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv2i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 7 + %a = call @llvm.riscv.vmslt.nxv2i32.i32( + %0, + i32 8, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 8, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv4i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmslt.nxv4i32.i32( + %0, + i32 10, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 10, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv4i32.i32( + %0, + %1, + i32 11, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv8i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 11 + %a = call @llvm.riscv.vmslt.nxv8i32.i32( + %0, + i32 12, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 12, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv8i32.i32( + %0, + %1, + i32 13, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv1i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 13 + %a = call @llvm.riscv.vmslt.nxv1i64.i64( + %0, + i64 14, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 14, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv1i64.i64( + %0, + %1, + i64 15, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv2i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, 15 + %a = call @llvm.riscv.vmslt.nxv2i64.i64( + %0, + i64 16, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -16, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv2i64.i64( + %0, + %1, + i64 -15, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmslt_vi_nxv4i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -15 + %a = call @llvm.riscv.vmslt.nxv4i64.i64( + %0, + i64 -14, + i64 %1) + + ret %a +} + +define @intrinsic_vmslt_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu +; CHECK: vmsle.vi {{v[0-9]+}}, {{v[0-9]+}}, -14, v0.t + %a = call @llvm.riscv.vmslt.mask.nxv4i64.i64( + %0, + %1, + i64 -13, + %2, + i64 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll @@ -1259,3 +1259,423 @@ ret %a } + +define @intrinsic_vmsltu_vi_nxv1i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -16 + %a = call @llvm.riscv.vmsltu.nxv1i8.i8( + %0, + i8 -15, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv1i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -15, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv1i8.i8( + %0, + %1, + i8 -14, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv2i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -14 + %a = call @llvm.riscv.vmsltu.nxv2i8.i8( + %0, + i8 -13, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv2i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -13, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv2i8.i8( + %0, + %1, + i8 -12, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv4i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -12 + %a = call @llvm.riscv.vmsltu.nxv4i8.i8( + %0, + i8 -11, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv4i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -11, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv4i8.i8( + %0, + %1, + i8 -10, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv8i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -10 + %a = call @llvm.riscv.vmsltu.nxv8i8.i8( + %0, + i8 -9, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv8i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv8i8.i8( + %0, + %1, + i8 -8, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv16i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -8 + %a = call @llvm.riscv.vmsltu.nxv16i8.i8( + %0, + i8 -7, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv16i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -7, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv16i8.i8( + %0, + %1, + i8 -6, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv32i8_i8( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -6 + %a = call @llvm.riscv.vmsltu.nxv32i8.i8( + %0, + i8 -5, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv32i8_i8( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -5, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv32i8.i8( + %0, + %1, + i8 -4, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv1i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -4 + %a = call @llvm.riscv.vmsltu.nxv1i16.i16( + %0, + i16 -3, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv1i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -3, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv1i16.i16( + %0, + %1, + i16 -2, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv2i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -2 + %a = call @llvm.riscv.vmsltu.nxv2i16.i16( + %0, + i16 -1, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv2i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vmsne.vv {{v[0-9]+}}, [[REG:v[0-9]+]], [[REG]], v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv2i16.i16( + %0, + %1, + i16 0, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv4i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmsne.vv {{v[0-9]+}}, [[REG:v[0-9]+]], [[REG]] + %a = call @llvm.riscv.vmsltu.nxv4i16.i16( + %0, + i16 0, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv4i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 0, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv8i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 1 + %a = call @llvm.riscv.vmsltu.nxv8i16.i16( + %0, + i16 2, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv8i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 2, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv8i16.i16( + %0, + %1, + i16 3, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv16i16_i16( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 3 + %a = call @llvm.riscv.vmsltu.nxv16i16.i16( + %0, + i16 4, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv16i16_i16( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 4, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv16i16.i16( + %0, + %1, + i16 5, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv1i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 5 + %a = call @llvm.riscv.vmsltu.nxv1i32.i32( + %0, + i32 6, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv1i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 6, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv1i32.i32( + %0, + %1, + i32 7, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv2i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 7 + %a = call @llvm.riscv.vmsltu.nxv2i32.i32( + %0, + i32 8, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv2i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 8, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv4i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmsltu.nxv4i32.i32( + %0, + i32 10, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv4i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 10, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv4i32.i32( + %0, + %1, + i32 11, + %2, + i32 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv8i32_i32( %0, i32 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 11 + %a = call @llvm.riscv.vmsltu.nxv8i32.i32( + %0, + i32 12, + i32 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv8i32_i32( %0, %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 12, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv8i32.i32( + %0, + %1, + i32 13, + %2, + i32 %3) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll --- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll @@ -1511,3 +1511,507 @@ ret %a } + +define @intrinsic_vmsltu_vi_nxv1i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -16 + %a = call @llvm.riscv.vmsltu.nxv1i8.i8( + %0, + i8 -15, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv1i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -15, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv1i8.i8( + %0, + %1, + i8 -14, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv2i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -14 + %a = call @llvm.riscv.vmsltu.nxv2i8.i8( + %0, + i8 -13, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv2i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -13, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv2i8.i8( + %0, + %1, + i8 -12, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv4i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -12 + %a = call @llvm.riscv.vmsltu.nxv4i8.i8( + %0, + i8 -11, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv4i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -11, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv4i8.i8( + %0, + %1, + i8 -10, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv8i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -10 + %a = call @llvm.riscv.vmsltu.nxv8i8.i8( + %0, + i8 -9, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv8i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -9, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv8i8.i8( + %0, + %1, + i8 -8, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv16i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -8 + %a = call @llvm.riscv.vmsltu.nxv16i8.i8( + %0, + i8 -7, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv16i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -7, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv16i8.i8( + %0, + %1, + i8 -6, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv32i8_i8( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -6 + %a = call @llvm.riscv.vmsltu.nxv32i8.i8( + %0, + i8 -5, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv32i8_i8( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv32i8_i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -5, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv32i8.i8( + %0, + %1, + i8 -4, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv1i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -4 + %a = call @llvm.riscv.vmsltu.nxv1i16.i16( + %0, + i16 -3, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv1i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -3, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv1i16.i16( + %0, + %1, + i16 -2, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv2i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -2 + %a = call @llvm.riscv.vmsltu.nxv2i16.i16( + %0, + i16 -1, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv2i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,tu,mu +; CHECK: vmsne.vv {{v[0-9]+}}, [[REG:v[0-9]+]], [[REG]], v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv2i16.i16( + %0, + %1, + i16 0, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv4i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vmsne.vv {{v[0-9]+}}, [[REG:v[0-9]+]], [[REG]] + %a = call @llvm.riscv.vmsltu.nxv4i16.i16( + %0, + i16 0, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv4i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 0, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv4i16.i16( + %0, + %1, + i16 1, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv8i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 1 + %a = call @llvm.riscv.vmsltu.nxv8i16.i16( + %0, + i16 2, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv8i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 2, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv8i16.i16( + %0, + %1, + i16 3, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv16i16_i16( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 3 + %a = call @llvm.riscv.vmsltu.nxv16i16.i16( + %0, + i16 4, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv16i16_i16( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv16i16_i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 4, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv16i16.i16( + %0, + %1, + i16 5, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv1i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 5 + %a = call @llvm.riscv.vmsltu.nxv1i32.i32( + %0, + i32 6, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv1i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 6, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv1i32.i32( + %0, + %1, + i32 7, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv2i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 7 + %a = call @llvm.riscv.vmsltu.nxv2i32.i32( + %0, + i32 8, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv2i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 8, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv2i32.i32( + %0, + %1, + i32 9, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv4i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 9 + %a = call @llvm.riscv.vmsltu.nxv4i32.i32( + %0, + i32 10, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv4i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 10, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv4i32.i32( + %0, + %1, + i32 11, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv8i32_i32( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 11 + %a = call @llvm.riscv.vmsltu.nxv8i32.i32( + %0, + i32 12, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv8i32_i32( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv8i32_i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 12, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv8i32.i32( + %0, + %1, + i32 13, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv1i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 13 + %a = call @llvm.riscv.vmsltu.nxv1i64.i64( + %0, + i64 14, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv1i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv1i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 14, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv1i64.i64( + %0, + %1, + i64 15, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv2i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, 15 + %a = call @llvm.riscv.vmsltu.nxv2i64.i64( + %0, + i64 16, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv2i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv2i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -16, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv2i64.i64( + %0, + %1, + i64 -15, + %2, + i64 %3) + + ret %a +} + +define @intrinsic_vmsltu_vi_nxv4i64_i64( %0, i64 %1) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -15 + %a = call @llvm.riscv.vmsltu.nxv4i64.i64( + %0, + i64 -14, + i64 %1) + + ret %a +} + +define @intrinsic_vmsltu_mask_vi_nxv4i64_i64( %0, %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vmsltu_mask_vi_nxv4i64_i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,tu,mu +; CHECK: vmsleu.vi {{v[0-9]+}}, {{v[0-9]+}}, -14, v0.t + %a = call @llvm.riscv.vmsltu.mask.nxv4i64.i64( + %0, + %1, + i64 -13, + %2, + i64 %3) + + ret %a +}