diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -77,6 +77,9 @@
   }];
 }
 
+def simm5_plus1_nonzero : ImmLeaf<XLenVT,
+  [{return Imm != 0 && ((isInt<5>(Imm) && Imm != -16) || Imm == 16);}]>;
+
 //===----------------------------------------------------------------------===//
 // Scheduling definitions.
 //===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td
@@ -3700,23 +3700,24 @@
   }
 }
 
-multiclass VPatCompare_VI<string intrinsic, string inst> {
+multiclass VPatCompare_VI<string intrinsic, string inst,
+                          ImmLeaf ImmType = simm5_plus1> {
   foreach vti = AllIntegerVectors in {
     defvar Intr = !cast<Intrinsic>(intrinsic);
     defvar Pseudo = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX);
     def : Pat<(vti.Mask (Intr (vti.Vector vti.RegClass:$rs1),
-                              (vti.Scalar simm5_plus1:$rs2),
+                              (vti.Scalar ImmType:$rs2),
                               VLOpFrag)),
-              (Pseudo vti.RegClass:$rs1, (DecImm simm5_plus1:$rs2),
+              (Pseudo vti.RegClass:$rs1, (DecImm ImmType:$rs2),
                       GPR:$vl, vti.Log2SEW)>;
     defvar IntrMask = !cast<Intrinsic>(intrinsic # "_mask");
     defvar PseudoMask = !cast<Instruction>(inst#"_VI_"#vti.LMul.MX#"_MASK");
     def : Pat<(vti.Mask (IntrMask (vti.Mask VR:$merge),
                                   (vti.Vector vti.RegClass:$rs1),
-                                  (vti.Scalar simm5_plus1:$rs2),
+                                  (vti.Scalar ImmType:$rs2),
                                   (vti.Mask V0),
                                   VLOpFrag)),
-              (PseudoMask VR:$merge, vti.RegClass:$rs1, (DecImm simm5_plus1:$rs2),
+              (PseudoMask VR:$merge, vti.RegClass:$rs1, (DecImm ImmType:$rs2),
                           (vti.Mask V0), GPR:$vl, vti.Log2SEW)>;
   }
 }
@@ -4521,15 +4522,12 @@
 // avoids the user needing to know that there is no vmslt(u).vi instruction.
 // Similar for vmsge(u).vx intrinsics using vmslt(u).vi.
 defm : VPatCompare_VI<"int_riscv_vmslt", "PseudoVMSLE">;
-defm : VPatCompare_VI<"int_riscv_vmsltu", "PseudoVMSLEU">;
-// Special cases to avoid matching vmsltu.vi 0 (always false) to
-// vmsleu.vi -1 (always true). Instead match to vmsne.vv.
-defm : VPatCompareUnsignedZero<"int_riscv_vmsltu", "PseudoVMSNE">;
+defm : VPatCompare_VI<"int_riscv_vmsltu", "PseudoVMSLEU", simm5_plus1_nonzero>;
 
 defm : VPatCompare_VI<"int_riscv_vmsge", "PseudoVMSGT">;
-defm : VPatCompare_VI<"int_riscv_vmsgeu", "PseudoVMSGTU">;
-// Special cases to avoid matching vmsgeu.vi 0 (always true) to
-// vmsgtu.vi -1 (always false). Instead match to vmsne.vv.
+defm : VPatCompare_VI<"int_riscv_vmsgeu", "PseudoVMSGTU", simm5_plus1_nonzero>;
+// Special cases for vmsgeu.vi 0 (always true). Instead match to vmsne.vv.
+// FIXME: We could match this to vmset.m or vmset.m+vmand.mm.
 defm : VPatCompareUnsignedZero<"int_riscv_vmsgeu", "PseudoVMSEQ">;
 
 //===----------------------------------------------------------------------===//
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv32.ll
@@ -2085,7 +2085,7 @@
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vv v10, v8, v8, v0.t
+; CHECK-NEXT:    vmsltu.vx v10, v8, zero, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -2103,7 +2103,7 @@
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v8
+; CHECK-NEXT:    vmsltu.vx v0, v8, zero
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(
diff --git a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
--- a/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vmsltu-rv64.ll
@@ -2049,7 +2049,7 @@
 ; CHECK-NEXT:    vmv1r.v v10, v0
 ; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
 ; CHECK-NEXT:    vmv1r.v v0, v9
-; CHECK-NEXT:    vmsne.vv v10, v8, v8, v0.t
+; CHECK-NEXT:    vmsltu.vx v10, v8, zero, v0.t
 ; CHECK-NEXT:    vmv1r.v v0, v10
 ; CHECK-NEXT:    ret
 entry:
@@ -2067,7 +2067,7 @@
 ; CHECK-LABEL: intrinsic_vmsltu_vi_nxv4i16_i16:
 ; CHECK:       # %bb.0: # %entry
 ; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
-; CHECK-NEXT:    vmsne.vv v0, v8, v8
+; CHECK-NEXT:    vmsltu.vx v0, v8, zero
 ; CHECK-NEXT:    ret
 entry:
   %a = call <vscale x 4 x i1> @llvm.riscv.vmsltu.nxv4i16.i16(