diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -327,6 +327,14 @@
     : RVVSignedBinBuiltinSet,
       RVVUnsignedBinBuiltinSet;
 
+multiclass RVVInt64BinBuiltinSet
+    : RVVOutOp1BuiltinSet<NAME, "l",
+                          [["vv", "v", "vvv"],
+                           ["vx", "v", "vve"]]>,
+      RVVOutOp1BuiltinSet<NAME, "l",
+                          [["vv", "Uv", "UvUvUv"],
+                           ["vx", "Uv", "UvUvUe"]]>;
+
 multiclass RVVSlideOneBuiltinSet
     : RVVOutOp1BuiltinSet<NAME, "csil",
                           [["vx", "v", "vve"],
@@ -2390,3 +2398,73 @@
     }
   }
 }
+
+multiclass RVVOutBuiltinSetP<bit HasVV = 1, bit HasVS = 1, bit HasSigned = 0,
+                             string type_range = "i", bit IsZvkb = 0> {
+  if HasVV then {
+    defvar suffix = !if(!or(HasVS, !eq(NAME, "vsm4r")), "vv", "v");
+    defm "" : RVVOutBuiltinSet<NAME # !if(IsZvkb, "", "_" # suffix), type_range,
+                               !listconcat(!if(HasSigned,
+                                               [[suffix, "v", !if(!eq(type_range, "i"),
+                                                                  "vvv", "vv")]],
+                                               []),
+                                           [[suffix, "Uv", !if(!eq(type_range, "i"),
+                                                               "UvUvUv", "UvUv")]])>;
+  }
+
+  if HasVS then {
+    defm "" : RVVOutBuiltinSet<NAME # "_vs", type_range,
+                               !listconcat(!if(HasSigned,
+                                               [["vs", "v", "vvv"]],
+                                               []),
+                                           [["vs", "Uv", "UvUvUv"]])>;
+  }
+}
+
+multiclass RVVOutOp1BuiltinSetP<bit IsVV = 1, string type_range = "i"> {
+  defvar suffix = !if(IsVV, "vv", "vi");
+  defvar prototype = !if(IsVV, "UvUvUvUv", "UvUvUvUe");
+  defm "" : RVVBuiltinSet<NAME, type_range, [[suffix, "Uv", prototype]], [-1, 2]>;
+}
+
+let UnMaskedPolicyScheme = HasPassthruOperand in {
+  // zvkb
+  defm vandn : RVVIntBinBuiltinSet;
+  defm vbrev8 : RVVOutBuiltinSetP<1, 0, 1, "csil", 1>;
+  defm vrev8 : RVVOutBuiltinSetP<1, 0, 1, "csil", 1>;
+  defm vclmul : RVVInt64BinBuiltinSet;
+  defm vclmulh : RVVInt64BinBuiltinSet;
+  defm vrol : RVVIntBinBuiltinSet;
+  defm vror : RVVIntBinBuiltinSet;
+}
+
+let UnMaskedPolicyScheme = HasPolicyOperand in {
+  // zvkns
+  let HasMasked = false in {
+    defm vaesdf : RVVOutBuiltinSetP;
+    defm vaesdm : RVVOutBuiltinSetP;
+    defm vaesef : RVVOutBuiltinSetP;
+    defm vaesem : RVVOutBuiltinSetP;
+    defm vaeskf1 : RVVOutOp1BuiltinSetP<0>;
+    defm vaeskf2 : RVVOutOp1BuiltinSetP<0>;
+    defm vaesz : RVVOutBuiltinSetP<0>;
+  }
+
+  let HasMasked = false in {
+    // zvkg
+    defm vghmac : RVVOutOp1BuiltinSetP;
+
+    // zvksed
+    defm vsm4k : RVVOutOp1BuiltinSetP<0>;
+    defm vsm4r : RVVOutBuiltinSetP<1, 0>;
+
+    // zvksh
+    defm vsm3c : RVVOutOp1BuiltinSetP<0>;
+    defm vsm3me : RVVOutOp1BuiltinSetP;
+
+    // zvknha or zvknhb
+    defm vsha2ch : RVVOutOp1BuiltinSetP<1, "il">;
+    defm vsha2cl : RVVOutOp1BuiltinSetP<1, "il">;
+    defm vsha2ms : RVVOutOp1BuiltinSetP<1, "il">;
+  }
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesdf.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesdf.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesdf.c
@@ -0,0 +1,275 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesdf.vv.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesdf_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesdf_vv_u32mf2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesdf.vv.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesdf_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesdf_vv_u32mf2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesdf.vv.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesdf_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t vs2, size_t vl) {
+  return vaesdf_vv_u32mf2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesdf.vv.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesdf_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesdf_vv_u32m1(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesdf.vv.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesdf_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesdf_vv_u32m1_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesdf.vv.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesdf_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t vs2, size_t vl) {
+  return vaesdf_vv_u32m1_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesdf.vv.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesdf_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesdf_vv_u32m2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesdf.vv.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesdf_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesdf_vv_u32m2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesdf.vv.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesdf_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t vs2, size_t vl) {
+  return vaesdf_vv_u32m2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesdf.vv.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesdf_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesdf_vv_u32m4(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesdf.vv.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesdf_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesdf_vv_u32m4_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesdf.vv.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesdf_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t vs2, size_t vl) {
+  return vaesdf_vv_u32m4_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesdf.vv.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesdf_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesdf_vv_u32m8(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesdf.vv.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesdf_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesdf_vv_u32m8_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesdf.vv.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesdf_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t vs2, size_t vl) {
+  return vaesdf_vv_u32m8_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesdf.vs.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesdf_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesdf_vs_u32mf2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesdf.vs.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesdf_vs_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesdf_vs_u32mf2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesdf.vs.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesdf_vs_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t vs2, size_t vl) {
+  return vaesdf_vs_u32mf2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesdf.vs.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesdf_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesdf_vs_u32m1(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesdf.vs.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesdf_vs_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesdf_vs_u32m1_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesdf.vs.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesdf_vs_u32m1_tu(vuint32m1_t merge, vuint32m1_t vs2, size_t vl) {
+  return vaesdf_vs_u32m1_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesdf.vs.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesdf_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesdf_vs_u32m2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesdf.vs.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesdf_vs_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesdf_vs_u32m2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesdf.vs.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesdf_vs_u32m2_tu(vuint32m2_t merge, vuint32m2_t vs2, size_t vl) {
+  return vaesdf_vs_u32m2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesdf.vs.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesdf_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesdf_vs_u32m4(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesdf.vs.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesdf_vs_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesdf_vs_u32m4_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesdf.vs.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesdf_vs_u32m4_tu(vuint32m4_t merge, vuint32m4_t vs2, size_t vl) {
+  return vaesdf_vs_u32m4_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesdf.vs.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesdf_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesdf_vs_u32m8(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesdf.vs.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesdf_vs_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesdf_vs_u32m8_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdf_vs_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesdf.vs.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesdf_vs_u32m8_tu(vuint32m8_t merge, vuint32m8_t vs2, size_t vl) {
+  return vaesdf_vs_u32m8_tu(merge, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesdm.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesdm.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesdm.c
@@ -0,0 +1,275 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesdm.vv.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesdm_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesdm_vv_u32mf2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesdm.vv.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesdm_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesdm_vv_u32mf2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesdm.vv.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesdm_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t vs2, size_t vl) {
+  return vaesdm_vv_u32mf2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesdm.vv.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesdm_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesdm_vv_u32m1(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesdm.vv.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesdm_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesdm_vv_u32m1_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesdm.vv.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesdm_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t vs2, size_t vl) {
+  return vaesdm_vv_u32m1_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesdm.vv.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesdm_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesdm_vv_u32m2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesdm.vv.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesdm_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesdm_vv_u32m2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesdm.vv.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesdm_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t vs2, size_t vl) {
+  return vaesdm_vv_u32m2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesdm.vv.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesdm_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesdm_vv_u32m4(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesdm.vv.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesdm_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesdm_vv_u32m4_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesdm.vv.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesdm_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t vs2, size_t vl) {
+  return vaesdm_vv_u32m4_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesdm.vv.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesdm_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesdm_vv_u32m8(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesdm.vv.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesdm_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesdm_vv_u32m8_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesdm.vv.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesdm_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t vs2, size_t vl) {
+  return vaesdm_vv_u32m8_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesdm.vs.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesdm_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesdm_vs_u32mf2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesdm.vs.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesdm_vs_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesdm_vs_u32mf2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesdm.vs.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesdm_vs_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t vs2, size_t vl) {
+  return vaesdm_vs_u32mf2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesdm.vs.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesdm_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesdm_vs_u32m1(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesdm.vs.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesdm_vs_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesdm_vs_u32m1_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesdm.vs.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesdm_vs_u32m1_tu(vuint32m1_t merge, vuint32m1_t vs2, size_t vl) {
+  return vaesdm_vs_u32m1_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesdm.vs.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesdm_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesdm_vs_u32m2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesdm.vs.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesdm_vs_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesdm_vs_u32m2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesdm.vs.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesdm_vs_u32m2_tu(vuint32m2_t merge, vuint32m2_t vs2, size_t vl) {
+  return vaesdm_vs_u32m2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesdm.vs.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesdm_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesdm_vs_u32m4(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesdm.vs.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesdm_vs_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesdm_vs_u32m4_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesdm.vs.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesdm_vs_u32m4_tu(vuint32m4_t merge, vuint32m4_t vs2, size_t vl) {
+  return vaesdm_vs_u32m4_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesdm.vs.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesdm_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesdm_vs_u32m8(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesdm.vs.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesdm_vs_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesdm_vs_u32m8_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesdm_vs_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesdm.vs.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesdm_vs_u32m8_tu(vuint32m8_t merge, vuint32m8_t vs2, size_t vl) {
+  return vaesdm_vs_u32m8_tu(merge, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesef.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesef.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesef.c
@@ -0,0 +1,275 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesef.vv.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesef_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesef_vv_u32mf2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesef.vv.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesef_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesef_vv_u32mf2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesef.vv.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesef_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t vs2, size_t vl) {
+  return vaesef_vv_u32mf2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesef.vv.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesef_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesef_vv_u32m1(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesef.vv.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesef_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesef_vv_u32m1_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesef.vv.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesef_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t vs2, size_t vl) {
+  return vaesef_vv_u32m1_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesef.vv.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesef_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesef_vv_u32m2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesef.vv.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesef_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesef_vv_u32m2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesef.vv.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesef_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t vs2, size_t vl) {
+  return vaesef_vv_u32m2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesef.vv.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesef_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesef_vv_u32m4(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesef.vv.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesef_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesef_vv_u32m4_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesef.vv.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesef_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t vs2, size_t vl) {
+  return vaesef_vv_u32m4_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesef.vv.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesef_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesef_vv_u32m8(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesef.vv.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesef_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesef_vv_u32m8_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesef.vv.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesef_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t vs2, size_t vl) {
+  return vaesef_vv_u32m8_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesef.vs.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesef_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesef_vs_u32mf2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesef.vs.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesef_vs_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesef_vs_u32mf2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesef.vs.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesef_vs_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t vs2, size_t vl) {
+  return vaesef_vs_u32mf2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesef.vs.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesef_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesef_vs_u32m1(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesef.vs.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesef_vs_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesef_vs_u32m1_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesef.vs.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesef_vs_u32m1_tu(vuint32m1_t merge, vuint32m1_t vs2, size_t vl) {
+  return vaesef_vs_u32m1_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesef.vs.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesef_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesef_vs_u32m2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesef.vs.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesef_vs_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesef_vs_u32m2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesef.vs.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesef_vs_u32m2_tu(vuint32m2_t merge, vuint32m2_t vs2, size_t vl) {
+  return vaesef_vs_u32m2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesef.vs.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesef_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesef_vs_u32m4(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesef.vs.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesef_vs_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesef_vs_u32m4_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesef.vs.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesef_vs_u32m4_tu(vuint32m4_t merge, vuint32m4_t vs2, size_t vl) {
+  return vaesef_vs_u32m4_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesef.vs.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesef_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesef_vs_u32m8(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesef.vs.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesef_vs_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesef_vs_u32m8_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesef_vs_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesef.vs.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesef_vs_u32m8_tu(vuint32m8_t merge, vuint32m8_t vs2, size_t vl) {
+  return vaesef_vs_u32m8_tu(merge, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesem.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesem.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesem.c
@@ -0,0 +1,275 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesem.vv.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesem_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesem_vv_u32mf2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesem.vv.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesem_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesem_vv_u32mf2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesem.vv.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesem_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t vs2, size_t vl) {
+  return vaesem_vv_u32mf2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesem.vv.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesem_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesem_vv_u32m1(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesem.vv.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesem_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesem_vv_u32m1_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesem.vv.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesem_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t vs2, size_t vl) {
+  return vaesem_vv_u32m1_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesem.vv.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesem_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesem_vv_u32m2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesem.vv.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesem_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesem_vv_u32m2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesem.vv.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesem_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t vs2, size_t vl) {
+  return vaesem_vv_u32m2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesem.vv.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesem_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesem_vv_u32m4(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesem.vv.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesem_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesem_vv_u32m4_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesem.vv.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesem_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t vs2, size_t vl) {
+  return vaesem_vv_u32m4_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesem.vv.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesem_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesem_vv_u32m8(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesem.vv.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesem_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesem_vv_u32m8_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesem.vv.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesem_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t vs2, size_t vl) {
+  return vaesem_vv_u32m8_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesem.vs.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesem_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesem_vs_u32mf2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesem.vs.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesem_vs_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesem_vs_u32mf2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesem.vs.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesem_vs_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t vs2, size_t vl) {
+  return vaesem_vs_u32mf2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesem.vs.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesem_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesem_vs_u32m1(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesem.vs.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesem_vs_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesem_vs_u32m1_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesem.vs.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesem_vs_u32m1_tu(vuint32m1_t merge, vuint32m1_t vs2, size_t vl) {
+  return vaesem_vs_u32m1_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesem.vs.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesem_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesem_vs_u32m2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesem.vs.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesem_vs_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesem_vs_u32m2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesem.vs.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesem_vs_u32m2_tu(vuint32m2_t merge, vuint32m2_t vs2, size_t vl) {
+  return vaesem_vs_u32m2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesem.vs.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesem_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesem_vs_u32m4(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesem.vs.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesem_vs_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesem_vs_u32m4_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesem.vs.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesem_vs_u32m4_tu(vuint32m4_t merge, vuint32m4_t vs2, size_t vl) {
+  return vaesem_vs_u32m4_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesem.vs.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesem_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesem_vs_u32m8(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesem.vs.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesem_vs_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesem_vs_u32m8_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesem_vs_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesem.vs.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesem_vs_u32m8_tu(vuint32m8_t merge, vuint32m8_t vs2, size_t vl) {
+  return vaesem_vs_u32m8_tu(merge, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaeskf1.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaeskf1.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaeskf1.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaeskf1.nxv1i32.i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[K:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaeskf1_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t op1, size_t vl, uint32_t k) {
+ return vaeskf1_vi_u32mf2(vd, op1, k, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaeskf1.nxv1i32.i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaeskf1_vi_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t op1, size_t vl) {
+ return vaeskf1_vi_u32mf2_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaeskf1.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaeskf1_vi_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
+ return vaeskf1_vi_u32mf2_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaeskf1.nxv2i32.i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaeskf1_vi_u32m1(vuint32m1_t vd, vuint32m1_t op1, size_t vl) {
+  return vaeskf1_vi_u32m1(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaeskf1.nxv2i32.i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaeskf1_vi_u32m1_ta(vuint32m1_t vd, vuint32m1_t op1, size_t vl) {
+  return vaeskf1_vi_u32m1_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaeskf1.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaeskf1_vi_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, size_t vl) {
+  return vaeskf1_vi_u32m1_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaeskf1.nxv4i32.i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaeskf1_vi_u32m2(vuint32m2_t vd, vuint32m2_t op1, size_t vl) {
+  return vaeskf1_vi_u32m2(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaeskf1.nxv4i32.i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaeskf1_vi_u32m2_ta(vuint32m2_t vd, vuint32m2_t op1, size_t vl) {
+  return vaeskf1_vi_u32m2_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaeskf1.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaeskf1_vi_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, size_t vl) {
+  return vaeskf1_vi_u32m2_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaeskf1.nxv8i32.i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaeskf1_vi_u32m4(vuint32m4_t vd, vuint32m4_t op1, size_t vl) {
+  return vaeskf1_vi_u32m4(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaeskf1.nxv8i32.i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaeskf1_vi_u32m4_ta(vuint32m4_t vd, vuint32m4_t op1, size_t vl) {
+  return vaeskf1_vi_u32m4_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaeskf1.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaeskf1_vi_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, size_t vl) {
+  return vaeskf1_vi_u32m4_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaeskf1.nxv16i32.i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaeskf1_vi_u32m8(vuint32m8_t vd, vuint32m8_t op1, size_t vl) {
+  return vaeskf1_vi_u32m8(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaeskf1.nxv16i32.i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaeskf1_vi_u32m8_ta(vuint32m8_t vd, vuint32m8_t op1, size_t vl) {
+  return vaeskf1_vi_u32m8_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf1_vi_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaeskf1.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaeskf1_vi_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, size_t vl) {
+  return vaeskf1_vi_u32m8_tu(merge, op1, 8, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaeskf2.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaeskf2.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaeskf2.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaeskf2.nxv1i32.i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaeskf2_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t op1, size_t vl) {
+ return vaeskf2_vi_u32mf2(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaeskf2.nxv1i32.i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaeskf2_vi_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t op1, size_t vl) {
+ return vaeskf2_vi_u32mf2_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaeskf2.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaeskf2_vi_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
+ return vaeskf2_vi_u32mf2_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaeskf2.nxv2i32.i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaeskf2_vi_u32m1(vuint32m1_t vd, vuint32m1_t op1, size_t vl) {
+  return vaeskf2_vi_u32m1(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaeskf2.nxv2i32.i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaeskf2_vi_u32m1_ta(vuint32m1_t vd, vuint32m1_t op1, size_t vl) {
+  return vaeskf2_vi_u32m1_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaeskf2.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaeskf2_vi_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, size_t vl) {
+  return vaeskf2_vi_u32m1_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaeskf2.nxv4i32.i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaeskf2_vi_u32m2(vuint32m2_t vd, vuint32m2_t op1, size_t vl) {
+  return vaeskf2_vi_u32m2(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaeskf2.nxv4i32.i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaeskf2_vi_u32m2_ta(vuint32m2_t vd, vuint32m2_t op1, size_t vl) {
+  return vaeskf2_vi_u32m2_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaeskf2.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaeskf2_vi_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, size_t vl) {
+  return vaeskf2_vi_u32m2_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaeskf2.nxv8i32.i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaeskf2_vi_u32m4(vuint32m4_t vd, vuint32m4_t op1, size_t vl) {
+  return vaeskf2_vi_u32m4(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaeskf2.nxv8i32.i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaeskf2_vi_u32m4_ta(vuint32m4_t vd, vuint32m4_t op1, size_t vl) {
+  return vaeskf2_vi_u32m4_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaeskf2.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaeskf2_vi_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, size_t vl) {
+  return vaeskf2_vi_u32m4_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaeskf2.nxv16i32.i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaeskf2_vi_u32m8(vuint32m8_t vd, vuint32m8_t op1, size_t vl) {
+  return vaeskf2_vi_u32m8(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaeskf2.nxv16i32.i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaeskf2_vi_u32m8_ta(vuint32m8_t vd, vuint32m8_t op1, size_t vl) {
+  return vaeskf2_vi_u32m8_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaeskf2_vi_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaeskf2.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaeskf2_vi_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, size_t vl) {
+  return vaeskf2_vi_u32m8_tu(merge, op1, 8, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesz.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesz.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vaesz.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesz.vs.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesz_vs_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesz_vs_u32mf2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesz.vs.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesz_vs_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vaesz_vs_u32mf2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vaesz.vs.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vaesz_vs_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t vs2, size_t vl) {
+  return vaesz_vs_u32mf2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesz.vs.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesz_vs_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesz_vs_u32m1(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesz.vs.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesz_vs_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vaesz_vs_u32m1_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vaesz.vs.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vaesz_vs_u32m1_tu(vuint32m1_t merge, vuint32m1_t vs2, size_t vl) {
+  return vaesz_vs_u32m1_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesz.vs.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesz_vs_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesz_vs_u32m2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesz.vs.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesz_vs_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vaesz_vs_u32m2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vaesz.vs.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vaesz_vs_u32m2_tu(vuint32m2_t merge, vuint32m2_t vs2, size_t vl) {
+  return vaesz_vs_u32m2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesz.vs.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesz_vs_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesz_vs_u32m4(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesz.vs.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesz_vs_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vaesz_vs_u32m4_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vaesz.vs.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vaesz_vs_u32m4_tu(vuint32m4_t merge, vuint32m4_t vs2, size_t vl) {
+  return vaesz_vs_u32m4_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesz.vs.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesz_vs_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesz_vs_u32m8(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesz.vs.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesz_vs_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vaesz_vs_u32m8_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vaesz_vs_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vaesz.vs.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vaesz_vs_u32m8_tu(vuint32m8_t merge, vuint32m8_t vs2, size_t vl) {
+  return vaesz_vs_u32m8_tu(merge, vs2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vandn.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vandn.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vandn.c
@@ -0,0 +1,6341 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vandn_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vv_i8mf8_m(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vandn_vv_i8mf8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vv_i8mf8_ta(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vandn_vv_i8mf8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vv_i8mf8_tu(vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vandn_vv_i8mf8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vv_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vandn_vv_i8mf8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vv_i8mf8_tamu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vandn_vv_i8mf8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vv_i8mf8_tuma(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vandn_vv_i8mf8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vandn_vv_i8mf8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vandn_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vv_i8mf4_m(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vandn_vv_i8mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vv_i8mf4_ta(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vandn_vv_i8mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vv_i8mf4_tu(vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vandn_vv_i8mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vv_i8mf4_tama(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vandn_vv_i8mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vv_i8mf4_tamu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vandn_vv_i8mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vv_i8mf4_tuma(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vandn_vv_i8mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vandn_vv_i8mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vandn_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vv_i8mf2_m(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vandn_vv_i8mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vv_i8mf2_ta(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vandn_vv_i8mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vv_i8mf2_tu(vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vandn_vv_i8mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vv_i8mf2_tama(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vandn_vv_i8mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vv_i8mf2_tamu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vandn_vv_i8mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vv_i8mf2_tuma(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vandn_vv_i8mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vandn_vv_i8mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vandn_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vv_i16mf4_m(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vandn_vv_i16mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vv_i16mf4_ta(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vandn_vv_i16mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vv_i16mf4_tu(vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vandn_vv_i16mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vv_i16mf4_tama(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vandn_vv_i16mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vv_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vandn_vv_i16mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vv_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vandn_vv_i16mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vandn_vv_i16mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vandn_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vv_i16mf2_m(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vandn_vv_i16mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vv_i16mf2_ta(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vandn_vv_i16mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vv_i16mf2_tu(vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vandn_vv_i16mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vv_i16mf2_tama(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vandn_vv_i16mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vv_i16mf2_tamu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vandn_vv_i16mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vv_i16mf2_tuma(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vandn_vv_i16mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vandn_vv_i16mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vandn_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vv_i32mf2_m(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vandn_vv_i32mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vandn_vv_i32mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vandn_vv_i32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vandn_vv_i32mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vandn_vv_i32mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vandn_vv_i32mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vandn_vv_i32mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vandn_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vv_i8m1_m(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vandn_vv_i8m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vv_i8m1_ta(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vandn_vv_i8m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vv_i8m1_tu(vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vandn_vv_i8m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vv_i8m1_tama(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vandn_vv_i8m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vv_i8m1_tamu(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vandn_vv_i8m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vv_i8m1_tuma(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vandn_vv_i8m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vv_i8m1_tumu(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vandn_vv_i8m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vandn_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vv_i8m2_m(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vandn_vv_i8m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vv_i8m2_ta(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vandn_vv_i8m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vv_i8m2_tu(vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vandn_vv_i8m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vv_i8m2_tama(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vandn_vv_i8m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vv_i8m2_tamu(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vandn_vv_i8m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vv_i8m2_tuma(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vandn_vv_i8m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vv_i8m2_tumu(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vandn_vv_i8m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vandn_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vv_i8m4_m(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vandn_vv_i8m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vv_i8m4_ta(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vandn_vv_i8m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vv_i8m4_tu(vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vandn_vv_i8m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vv_i8m4_tama(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vandn_vv_i8m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vv_i8m4_tamu(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vandn_vv_i8m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vv_i8m4_tuma(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vandn_vv_i8m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vv_i8m4_tumu(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vandn_vv_i8m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vandn_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vv_i8m8_m(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vandn_vv_i8m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vv_i8m8_ta(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vandn_vv_i8m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vv_i8m8_tu(vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vandn_vv_i8m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vv_i8m8_tama(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vandn_vv_i8m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vv_i8m8_tamu(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vandn_vv_i8m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vv_i8m8_tuma(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vandn_vv_i8m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vv_i8m8_tumu(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vandn_vv_i8m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vandn_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vv_i16m1_m(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vandn_vv_i16m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vv_i16m1_ta(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vandn_vv_i16m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vv_i16m1_tu(vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vandn_vv_i16m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vv_i16m1_tama(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vandn_vv_i16m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vv_i16m1_tamu(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vandn_vv_i16m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vv_i16m1_tuma(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vandn_vv_i16m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vv_i16m1_tumu(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vandn_vv_i16m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vandn_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vv_i16m2_m(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vandn_vv_i16m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vv_i16m2_ta(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vandn_vv_i16m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vv_i16m2_tu(vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vandn_vv_i16m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vv_i16m2_tama(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vandn_vv_i16m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vv_i16m2_tamu(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vandn_vv_i16m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vv_i16m2_tuma(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vandn_vv_i16m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vv_i16m2_tumu(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vandn_vv_i16m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vandn_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vv_i16m4_m(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vandn_vv_i16m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vv_i16m4_ta(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vandn_vv_i16m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vv_i16m4_tu(vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vandn_vv_i16m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vv_i16m4_tama(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vandn_vv_i16m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vv_i16m4_tamu(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vandn_vv_i16m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vv_i16m4_tuma(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vandn_vv_i16m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vv_i16m4_tumu(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vandn_vv_i16m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vandn_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vv_i16m8_m(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vandn_vv_i16m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vv_i16m8_ta(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vandn_vv_i16m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vv_i16m8_tu(vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vandn_vv_i16m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vv_i16m8_tama(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vandn_vv_i16m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vv_i16m8_tamu(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vandn_vv_i16m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vv_i16m8_tuma(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vandn_vv_i16m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vv_i16m8_tumu(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vandn_vv_i16m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vandn_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vv_i32m1_m(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vandn_vv_i32m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vv_i32m1_ta(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vandn_vv_i32m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vv_i32m1_tu(vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vandn_vv_i32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vv_i32m1_tama(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vandn_vv_i32m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vv_i32m1_tamu(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vandn_vv_i32m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vv_i32m1_tuma(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vandn_vv_i32m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vv_i32m1_tumu(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vandn_vv_i32m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vandn_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vv_i32m2_m(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vandn_vv_i32m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vv_i32m2_ta(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vandn_vv_i32m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vv_i32m2_tu(vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vandn_vv_i32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vv_i32m2_tama(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vandn_vv_i32m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vv_i32m2_tamu(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vandn_vv_i32m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vv_i32m2_tuma(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vandn_vv_i32m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vv_i32m2_tumu(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vandn_vv_i32m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vandn_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vv_i32m4_m(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vandn_vv_i32m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vv_i32m4_ta(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vandn_vv_i32m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vv_i32m4_tu(vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vandn_vv_i32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vv_i32m4_tama(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vandn_vv_i32m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vv_i32m4_tamu(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vandn_vv_i32m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vv_i32m4_tuma(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vandn_vv_i32m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vv_i32m4_tumu(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vandn_vv_i32m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vandn_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vv_i32m8_m(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vandn_vv_i32m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vv_i32m8_ta(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vandn_vv_i32m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vv_i32m8_tu(vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vandn_vv_i32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vv_i32m8_tama(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vandn_vv_i32m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vv_i32m8_tamu(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vandn_vv_i32m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vv_i32m8_tuma(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vandn_vv_i32m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vv_i32m8_tumu(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vandn_vv_i32m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vandn_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vv_i64m1_m(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vandn_vv_i64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vv_i64m1_ta(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vandn_vv_i64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vv_i64m1_tu(vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vandn_vv_i64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vv_i64m1_tama(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vandn_vv_i64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vandn_vv_i64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vandn_vv_i64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vandn_vv_i64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vandn_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vv_i64m2_m(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vandn_vv_i64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vv_i64m2_ta(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vandn_vv_i64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vv_i64m2_tu(vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vandn_vv_i64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vv_i64m2_tama(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vandn_vv_i64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vv_i64m2_tamu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vandn_vv_i64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vv_i64m2_tuma(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vandn_vv_i64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vv_i64m2_tumu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vandn_vv_i64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vandn_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vv_i64m4_m(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vandn_vv_i64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vv_i64m4_ta(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vandn_vv_i64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vv_i64m4_tu(vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vandn_vv_i64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vv_i64m4_tama(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vandn_vv_i64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vv_i64m4_tamu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vandn_vv_i64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vv_i64m4_tuma(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vandn_vv_i64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vv_i64m4_tumu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vandn_vv_i64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vandn_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vv_i64m8_m(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vandn_vv_i64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vv_i64m8_ta(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vandn_vv_i64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vv_i64m8_tu(vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vandn_vv_i64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vv_i64m8_tama(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vandn_vv_i64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vv_i64m8_tamu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vandn_vv_i64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vv_i64m8_tuma(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vandn_vv_i64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_i64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vv_i64m8_tumu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vandn_vv_i64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vandn_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vandn_vv_u8mf8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vv_u8mf8_ta(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vandn_vv_u8mf8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vv_u8mf8_tu(vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vandn_vv_u8mf8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vv_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vandn_vv_u8mf8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vv_u8mf8_tamu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vandn_vv_u8mf8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vv_u8mf8_tuma(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vandn_vv_u8mf8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vandn_vv_u8mf8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vandn_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vandn_vv_u8mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vv_u8mf4_ta(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vandn_vv_u8mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vv_u8mf4_tu(vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vandn_vv_u8mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vv_u8mf4_tama(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vandn_vv_u8mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vv_u8mf4_tamu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vandn_vv_u8mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vv_u8mf4_tuma(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vandn_vv_u8mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vandn_vv_u8mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vandn_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vandn_vv_u8mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vv_u8mf2_ta(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vandn_vv_u8mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vv_u8mf2_tu(vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vandn_vv_u8mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vv_u8mf2_tama(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vandn_vv_u8mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vv_u8mf2_tamu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vandn_vv_u8mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vv_u8mf2_tuma(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vandn_vv_u8mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vandn_vv_u8mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vandn_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vandn_vv_u16mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vv_u16mf4_ta(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vandn_vv_u16mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vv_u16mf4_tu(vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vandn_vv_u16mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vv_u16mf4_tama(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vandn_vv_u16mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vv_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vandn_vv_u16mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vv_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vandn_vv_u16mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vandn_vv_u16mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vandn_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vandn_vv_u16mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vv_u16mf2_ta(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vandn_vv_u16mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vv_u16mf2_tu(vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vandn_vv_u16mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vv_u16mf2_tama(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vandn_vv_u16mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vv_u16mf2_tamu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vandn_vv_u16mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vv_u16mf2_tuma(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vandn_vv_u16mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vandn_vv_u16mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vandn_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vandn_vv_u32mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vandn_vv_u32mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vandn_vv_u32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vandn_vv_u32mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vandn_vv_u32mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vandn_vv_u32mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vandn_vv_u32mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vandn_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vv_u8m1_m(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vandn_vv_u8m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vv_u8m1_ta(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vandn_vv_u8m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vv_u8m1_tu(vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vandn_vv_u8m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vv_u8m1_tama(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vandn_vv_u8m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vv_u8m1_tamu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vandn_vv_u8m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vv_u8m1_tuma(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vandn_vv_u8m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vandn_vv_u8m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vandn_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vv_u8m2_m(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vandn_vv_u8m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vv_u8m2_ta(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vandn_vv_u8m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vv_u8m2_tu(vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vandn_vv_u8m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vv_u8m2_tama(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vandn_vv_u8m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vv_u8m2_tamu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vandn_vv_u8m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vv_u8m2_tuma(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vandn_vv_u8m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vandn_vv_u8m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vandn_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vv_u8m4_m(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vandn_vv_u8m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vv_u8m4_ta(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vandn_vv_u8m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vv_u8m4_tu(vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vandn_vv_u8m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vv_u8m4_tama(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vandn_vv_u8m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vv_u8m4_tamu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vandn_vv_u8m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vv_u8m4_tuma(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vandn_vv_u8m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vandn_vv_u8m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vandn_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vv_u8m8_m(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vandn_vv_u8m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vv_u8m8_ta(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vandn_vv_u8m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vv_u8m8_tu(vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vandn_vv_u8m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vv_u8m8_tama(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vandn_vv_u8m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vv_u8m8_tamu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vandn_vv_u8m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vv_u8m8_tuma(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vandn_vv_u8m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vandn_vv_u8m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vandn_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vv_u16m1_m(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vandn_vv_u16m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vv_u16m1_ta(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vandn_vv_u16m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vv_u16m1_tu(vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vandn_vv_u16m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vv_u16m1_tama(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vandn_vv_u16m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vv_u16m1_tamu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vandn_vv_u16m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vv_u16m1_tuma(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vandn_vv_u16m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vandn_vv_u16m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vandn_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vv_u16m2_m(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vandn_vv_u16m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vv_u16m2_ta(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vandn_vv_u16m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vv_u16m2_tu(vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vandn_vv_u16m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vv_u16m2_tama(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vandn_vv_u16m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vv_u16m2_tamu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vandn_vv_u16m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vv_u16m2_tuma(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vandn_vv_u16m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vandn_vv_u16m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vandn_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vv_u16m4_m(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vandn_vv_u16m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vv_u16m4_ta(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vandn_vv_u16m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vv_u16m4_tu(vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vandn_vv_u16m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vv_u16m4_tama(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vandn_vv_u16m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vv_u16m4_tamu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vandn_vv_u16m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vv_u16m4_tuma(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vandn_vv_u16m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vandn_vv_u16m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vandn_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vv_u16m8_m(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vandn_vv_u16m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vv_u16m8_ta(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vandn_vv_u16m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vv_u16m8_tu(vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vandn_vv_u16m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vv_u16m8_tama(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vandn_vv_u16m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vv_u16m8_tamu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vandn_vv_u16m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vv_u16m8_tuma(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vandn_vv_u16m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vandn_vv_u16m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vandn_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vv_u32m1_m(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vandn_vv_u32m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vv_u32m1_ta(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vandn_vv_u32m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vandn_vv_u32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vv_u32m1_tama(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vandn_vv_u32m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vv_u32m1_tamu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vandn_vv_u32m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vv_u32m1_tuma(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vandn_vv_u32m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vandn_vv_u32m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vandn_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vv_u32m2_m(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vandn_vv_u32m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vv_u32m2_ta(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vandn_vv_u32m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vandn_vv_u32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vv_u32m2_tama(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vandn_vv_u32m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vv_u32m2_tamu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vandn_vv_u32m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vv_u32m2_tuma(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vandn_vv_u32m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vandn_vv_u32m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vandn_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vv_u32m4_m(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vandn_vv_u32m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vv_u32m4_ta(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vandn_vv_u32m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vandn_vv_u32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vv_u32m4_tama(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vandn_vv_u32m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vv_u32m4_tamu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vandn_vv_u32m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vv_u32m4_tuma(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vandn_vv_u32m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vandn_vv_u32m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vandn_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vv_u32m8_m(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vandn_vv_u32m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vv_u32m8_ta(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vandn_vv_u32m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vandn_vv_u32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vv_u32m8_tama(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vandn_vv_u32m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vv_u32m8_tamu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vandn_vv_u32m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vv_u32m8_tuma(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vandn_vv_u32m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vandn_vv_u32m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vandn_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vv_u64m1_m(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vandn_vv_u64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vv_u64m1_ta(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vandn_vv_u64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vv_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vandn_vv_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vv_u64m1_tama(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vandn_vv_u64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vv_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vandn_vv_u64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vv_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vandn_vv_u64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vandn_vv_u64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vandn_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vv_u64m2_m(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vandn_vv_u64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vv_u64m2_ta(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vandn_vv_u64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vv_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vandn_vv_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vv_u64m2_tama(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vandn_vv_u64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vv_u64m2_tamu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vandn_vv_u64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vv_u64m2_tuma(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vandn_vv_u64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vandn_vv_u64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vandn_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vv_u64m4_m(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vandn_vv_u64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vv_u64m4_ta(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vandn_vv_u64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vv_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vandn_vv_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vv_u64m4_tama(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vandn_vv_u64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vv_u64m4_tamu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vandn_vv_u64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vv_u64m4_tuma(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vandn_vv_u64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vandn_vv_u64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vandn_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vv_u64m8_m(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vandn_vv_u64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vv_u64m8_ta(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vandn_vv_u64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vv_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vandn_vv_u64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vv_u64m8_tama(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vandn_vv_u64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vv_u64m8_tamu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vandn_vv_u64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vv_u64m8_tuma(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vandn_vv_u64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vv_u64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vandn_vv_u64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vx_i8mf8_m(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vx_i8mf8_ta(vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vx_i8mf8_tu(vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vx_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vx_i8mf8_tamu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vx_i8mf8_tuma(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vandn_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vx_i8mf4_m(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vx_i8mf4_ta(vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vx_i8mf4_tu(vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vx_i8mf4_tama(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vx_i8mf4_tamu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vx_i8mf4_tuma(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vandn_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vx_i8mf2_m(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vx_i8mf2_ta(vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vx_i8mf2_tu(vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vx_i8mf2_tama(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vx_i8mf2_tamu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vx_i8mf2_tuma(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vandn_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vx_i16mf4_m(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vx_i16mf4_ta(vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vx_i16mf4_tu(vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vx_i16mf4_tama(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vx_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vx_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vandn_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vx_i16mf2_m(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vx_i16mf2_ta(vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vx_i16mf2_tu(vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vx_i16mf2_tama(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vx_i16mf2_tamu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vx_i16mf2_tuma(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vandn_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vx_i32mf2_m(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vandn_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vx_i8m1_m(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vx_i8m1_ta(vint8m1_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vx_i8m1_tu(vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vx_i8m1_tama(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vx_i8m1_tamu(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vx_i8m1_tuma(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vandn_vx_i8m1_tumu(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vx_i8m2_m(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vx_i8m2_ta(vint8m2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vx_i8m2_tu(vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vx_i8m2_tama(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vx_i8m2_tamu(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vx_i8m2_tuma(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vandn_vx_i8m2_tumu(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vx_i8m4_m(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vx_i8m4_ta(vint8m4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vx_i8m4_tu(vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vx_i8m4_tama(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vx_i8m4_tamu(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vx_i8m4_tuma(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vandn_vx_i8m4_tumu(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vx_i8m8_m(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vx_i8m8_ta(vint8m8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vx_i8m8_tu(vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vx_i8m8_tama(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vx_i8m8_tamu(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vx_i8m8_tuma(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vandn_vx_i8m8_tumu(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vandn_vx_i8m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vx_i16m1_m(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vx_i16m1_ta(vint16m1_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vx_i16m1_tu(vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vx_i16m1_tama(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vx_i16m1_tamu(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vx_i16m1_tuma(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vandn_vx_i16m1_tumu(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vx_i16m2_m(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vx_i16m2_ta(vint16m2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vx_i16m2_tu(vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vx_i16m2_tama(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vx_i16m2_tamu(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vx_i16m2_tuma(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vandn_vx_i16m2_tumu(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vx_i16m4_m(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vx_i16m4_ta(vint16m4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vx_i16m4_tu(vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vx_i16m4_tama(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vx_i16m4_tamu(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vx_i16m4_tuma(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vandn_vx_i16m4_tumu(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vx_i16m8_m(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vx_i16m8_ta(vint16m8_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vx_i16m8_tu(vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vx_i16m8_tama(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vx_i16m8_tamu(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vx_i16m8_tuma(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vandn_vx_i16m8_tumu(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vandn_vx_i16m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vx_i32m1_m(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vx_i32m1_ta(vint32m1_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vx_i32m1_tu(vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vx_i32m1_tama(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vx_i32m1_tamu(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vx_i32m1_tuma(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vandn_vx_i32m1_tumu(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vx_i32m2_m(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vx_i32m2_ta(vint32m2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vx_i32m2_tu(vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vx_i32m2_tama(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vx_i32m2_tamu(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vx_i32m2_tuma(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vandn_vx_i32m2_tumu(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vx_i32m4_m(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vx_i32m4_ta(vint32m4_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vx_i32m4_tu(vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vx_i32m4_tama(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vx_i32m4_tamu(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vx_i32m4_tuma(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vandn_vx_i32m4_tumu(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vx_i32m8_m(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vx_i32m8_ta(vint32m8_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vx_i32m8_tu(vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vx_i32m8_tama(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vx_i32m8_tamu(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vx_i32m8_tuma(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vandn_vx_i32m8_tumu(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vandn_vx_i32m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vx_i64m1_m(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vx_i64m1_ta(vint64m1_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vx_i64m1_tu(vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vx_i64m1_tama(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vandn_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vx_i64m2_m(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vx_i64m2_ta(vint64m2_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vx_i64m2_tu(vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vx_i64m2_tama(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vx_i64m2_tamu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vx_i64m2_tuma(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vandn_vx_i64m2_tumu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vx_i64m4_m(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vx_i64m4_ta(vint64m4_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vx_i64m4_tu(vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vx_i64m4_tama(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vx_i64m4_tamu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vx_i64m4_tuma(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vandn_vx_i64m4_tumu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vx_i64m8_m(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vx_i64m8_ta(vint64m8_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vx_i64m8_tu(vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vx_i64m8_tama(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vx_i64m8_tamu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vx_i64m8_tuma(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_i64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vandn_vx_i64m8_tumu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vandn_vx_i64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vx_u8mf8_ta(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vx_u8mf8_tu(vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vx_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vx_u8mf8_tamu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vx_u8mf8_tuma(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vandn.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vandn_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vx_u8mf4_ta(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vx_u8mf4_tu(vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vx_u8mf4_tama(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vx_u8mf4_tamu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vx_u8mf4_tuma(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vandn.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vandn_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vx_u8mf2_ta(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vx_u8mf2_tu(vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vx_u8mf2_tama(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vx_u8mf2_tamu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vx_u8mf2_tuma(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vandn.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vandn_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vx_u16mf4_ta(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vx_u16mf4_tu(vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vx_u16mf4_tama(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vx_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vx_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vandn.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vandn_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vx_u16mf2_ta(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vx_u16mf2_tu(vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vx_u16mf2_tama(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vx_u16mf2_tamu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vx_u16mf2_tuma(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vandn.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vandn_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vandn.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vandn_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vx_u8m1_m(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vx_u8m1_ta(vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vx_u8m1_tu(vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vx_u8m1_tama(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vx_u8m1_tamu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vx_u8m1_tuma(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vandn.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vandn_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vx_u8m2_m(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vx_u8m2_ta(vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vx_u8m2_tu(vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vx_u8m2_tama(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vx_u8m2_tamu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vx_u8m2_tuma(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vandn.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vandn_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vx_u8m4_m(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vx_u8m4_ta(vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vx_u8m4_tu(vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vx_u8m4_tama(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vx_u8m4_tamu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vx_u8m4_tuma(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vandn.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vandn_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vx_u8m8_m(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vx_u8m8_ta(vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vx_u8m8_tu(vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vx_u8m8_tama(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vx_u8m8_tamu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vx_u8m8_tuma(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vandn.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vandn_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vandn_vx_u8m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vx_u16m1_m(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vx_u16m1_ta(vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vx_u16m1_tu(vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vx_u16m1_tama(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vx_u16m1_tamu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vx_u16m1_tuma(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vandn.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vandn_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vx_u16m2_m(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vx_u16m2_ta(vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vx_u16m2_tu(vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vx_u16m2_tama(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vx_u16m2_tamu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vx_u16m2_tuma(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vandn.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vandn_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vx_u16m4_m(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vx_u16m4_ta(vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vx_u16m4_tu(vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vx_u16m4_tama(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vx_u16m4_tamu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vx_u16m4_tuma(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vandn.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vandn_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vx_u16m8_m(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vx_u16m8_ta(vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vx_u16m8_tu(vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vx_u16m8_tama(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vx_u16m8_tamu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vx_u16m8_tuma(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vandn.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vandn_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vandn_vx_u16m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vx_u32m1_m(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vx_u32m1_ta(vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vx_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vx_u32m1_tama(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vx_u32m1_tamu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vx_u32m1_tuma(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vandn.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vandn_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vx_u32m2_m(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vx_u32m2_ta(vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vx_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vx_u32m2_tama(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vx_u32m2_tamu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vx_u32m2_tuma(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vandn.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vandn_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vx_u32m4_m(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vx_u32m4_ta(vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vx_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vx_u32m4_tama(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vx_u32m4_tamu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vx_u32m4_tuma(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vandn.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vandn_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vx_u32m8_m(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vx_u32m8_ta(vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vx_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vx_u32m8_tama(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vx_u32m8_tamu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vx_u32m8_tuma(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vandn.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vandn_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vandn_vx_u32m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vx_u64m1_m(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vx_u64m1_ta(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vx_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vx_u64m1_tama(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vx_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vx_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vandn.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vandn_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vx_u64m2_m(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vx_u64m2_ta(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vx_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vx_u64m2_tama(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vx_u64m2_tamu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vx_u64m2_tuma(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vandn.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vandn_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vx_u64m4_m(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vx_u64m4_ta(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vx_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vx_u64m4_tama(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vx_u64m4_tamu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vx_u64m4_tuma(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vandn.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vandn_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vx_u64m8_m(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vx_u64m8_ta(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vx_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vx_u64m8_tama(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vx_u64m8_tamu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vx_u64m8_tuma(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vandn_vx_u64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vandn.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vandn_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vandn_vx_u64m8_tumu(mask, merge, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vbrev8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vbrev8.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vbrev8.c
@@ -0,0 +1,3173 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vbrev8_i8mf8(vint8mf8_t src, size_t vl) {
+  return vbrev8_v_i8mf8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vbrev8_i8mf8_m(vbool64_t mask, vint8mf8_t merge, vint8mf8_t src, size_t vl) {
+  return vbrev8_v_i8mf8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vbrev8_i8mf8_ta(vint8mf8_t src, size_t vl) {
+  return vbrev8_v_i8mf8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vbrev8_i8mf8_tu(vint8mf8_t merge, vint8mf8_t src, size_t vl) {
+  return vbrev8_v_i8mf8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vbrev8_i8mf8_tama(vbool64_t mask, vint8mf8_t src, size_t vl) {
+  return vbrev8_v_i8mf8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vbrev8_i8mf8_tamu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t src, size_t vl) {
+  return vbrev8_v_i8mf8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vbrev8_i8mf8_tuma(vbool64_t mask, vint8mf8_t merge, vint8mf8_t src, size_t vl) {
+  return vbrev8_v_i8mf8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vbrev8_i8mf8_tumu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t src, size_t vl) {
+  return vbrev8_v_i8mf8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vbrev8_i8mf4(vint8mf4_t src, size_t vl) {
+  return vbrev8_v_i8mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vbrev8_i8mf4_m(vbool32_t mask, vint8mf4_t merge, vint8mf4_t src, size_t vl) {
+  return vbrev8_v_i8mf4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vbrev8_i8mf4_ta(vint8mf4_t src, size_t vl) {
+  return vbrev8_v_i8mf4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vbrev8_i8mf4_tu(vint8mf4_t merge, vint8mf4_t src, size_t vl) {
+  return vbrev8_v_i8mf4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vbrev8_i8mf4_tama(vbool32_t mask, vint8mf4_t src, size_t vl) {
+  return vbrev8_v_i8mf4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vbrev8_i8mf4_tamu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t src, size_t vl) {
+  return vbrev8_v_i8mf4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vbrev8_i8mf4_tuma(vbool32_t mask, vint8mf4_t merge, vint8mf4_t src, size_t vl) {
+  return vbrev8_v_i8mf4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vbrev8_i8mf4_tumu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t src, size_t vl) {
+  return vbrev8_v_i8mf4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vbrev8_i8mf2(vint8mf2_t src, size_t vl) {
+  return vbrev8_v_i8mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vbrev8_i8mf2_m(vbool16_t mask, vint8mf2_t merge, vint8mf2_t src, size_t vl) {
+  return vbrev8_v_i8mf2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vbrev8_i8mf2_ta(vint8mf2_t src, size_t vl) {
+  return vbrev8_v_i8mf2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vbrev8_i8mf2_tu(vint8mf2_t merge, vint8mf2_t src, size_t vl) {
+  return vbrev8_v_i8mf2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vbrev8_i8mf2_tama(vbool16_t mask, vint8mf2_t src, size_t vl) {
+  return vbrev8_v_i8mf2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vbrev8_i8mf2_tamu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t src, size_t vl) {
+  return vbrev8_v_i8mf2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vbrev8_i8mf2_tuma(vbool16_t mask, vint8mf2_t merge, vint8mf2_t src, size_t vl) {
+  return vbrev8_v_i8mf2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vbrev8_i8mf2_tumu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t src, size_t vl) {
+  return vbrev8_v_i8mf2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vbrev8_i16mf4(vint16mf4_t src, size_t vl) {
+  return vbrev8_v_i16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vbrev8_i16mf4_m(vbool64_t mask, vint16mf4_t merge, vint16mf4_t src, size_t vl) {
+  return vbrev8_v_i16mf4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vbrev8_i16mf4_ta(vint16mf4_t src, size_t vl) {
+  return vbrev8_v_i16mf4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vbrev8_i16mf4_tu(vint16mf4_t merge, vint16mf4_t src, size_t vl) {
+  return vbrev8_v_i16mf4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vbrev8_i16mf4_tama(vbool64_t mask, vint16mf4_t src, size_t vl) {
+  return vbrev8_v_i16mf4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vbrev8_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t src, size_t vl) {
+  return vbrev8_v_i16mf4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vbrev8_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint16mf4_t src, size_t vl) {
+  return vbrev8_v_i16mf4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vbrev8_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t src, size_t vl) {
+  return vbrev8_v_i16mf4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vbrev8_i16mf2(vint16mf2_t src, size_t vl) {
+  return vbrev8_v_i16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vbrev8_i16mf2_m(vbool32_t mask, vint16mf2_t merge, vint16mf2_t src, size_t vl) {
+  return vbrev8_v_i16mf2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vbrev8_i16mf2_ta(vint16mf2_t src, size_t vl) {
+  return vbrev8_v_i16mf2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vbrev8_i16mf2_tu(vint16mf2_t merge, vint16mf2_t src, size_t vl) {
+  return vbrev8_v_i16mf2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vbrev8_i16mf2_tama(vbool32_t mask, vint16mf2_t src, size_t vl) {
+  return vbrev8_v_i16mf2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vbrev8_i16mf2_tamu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t src, size_t vl) {
+  return vbrev8_v_i16mf2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vbrev8_i16mf2_tuma(vbool32_t mask, vint16mf2_t merge, vint16mf2_t src, size_t vl) {
+  return vbrev8_v_i16mf2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vbrev8_i16mf2_tumu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t src, size_t vl) {
+  return vbrev8_v_i16mf2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vbrev8_i32mf2(vint32mf2_t src, size_t vl) {
+  return vbrev8_v_i32mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vbrev8_i32mf2_m(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) {
+  return vbrev8_v_i32mf2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vbrev8_i32mf2_ta(vint32mf2_t src, size_t vl) {
+  return vbrev8_v_i32mf2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vbrev8_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, size_t vl) {
+  return vbrev8_v_i32mf2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vbrev8_i32mf2_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
+  return vbrev8_v_i32mf2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vbrev8_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) {
+  return vbrev8_v_i32mf2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vbrev8_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) {
+  return vbrev8_v_i32mf2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vbrev8_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) {
+  return vbrev8_v_i32mf2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vbrev8_i8m1(vint8m1_t src, size_t vl) {
+  return vbrev8_v_i8m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vbrev8_i8m1_m(vbool8_t mask, vint8m1_t merge, vint8m1_t src, size_t vl) {
+  return vbrev8_v_i8m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vbrev8_i8m1_ta(vint8m1_t src, size_t vl) {
+  return vbrev8_v_i8m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vbrev8_i8m1_tu(vint8m1_t merge, vint8m1_t src, size_t vl) {
+  return vbrev8_v_i8m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vbrev8_i8m1_tama(vbool8_t mask, vint8m1_t src, size_t vl) {
+  return vbrev8_v_i8m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vbrev8_i8m1_tamu(vbool8_t mask, vint8m1_t merge, vint8m1_t src, size_t vl) {
+  return vbrev8_v_i8m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vbrev8_i8m1_tuma(vbool8_t mask, vint8m1_t merge, vint8m1_t src, size_t vl) {
+  return vbrev8_v_i8m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vbrev8_i8m1_tumu(vbool8_t mask, vint8m1_t merge, vint8m1_t src, size_t vl) {
+  return vbrev8_v_i8m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vbrev8_i8m2(vint8m2_t src, size_t vl) {
+  return vbrev8_v_i8m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vbrev8_i8m2_m(vbool4_t mask, vint8m2_t merge, vint8m2_t src, size_t vl) {
+  return vbrev8_v_i8m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vbrev8_i8m2_ta(vint8m2_t src, size_t vl) {
+  return vbrev8_v_i8m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vbrev8_i8m2_tu(vint8m2_t merge, vint8m2_t src, size_t vl) {
+  return vbrev8_v_i8m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vbrev8_i8m2_tama(vbool4_t mask, vint8m2_t src, size_t vl) {
+  return vbrev8_v_i8m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vbrev8_i8m2_tamu(vbool4_t mask, vint8m2_t merge, vint8m2_t src, size_t vl) {
+  return vbrev8_v_i8m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vbrev8_i8m2_tuma(vbool4_t mask, vint8m2_t merge, vint8m2_t src, size_t vl) {
+  return vbrev8_v_i8m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vbrev8_i8m2_tumu(vbool4_t mask, vint8m2_t merge, vint8m2_t src, size_t vl) {
+  return vbrev8_v_i8m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vbrev8_i8m4(vint8m4_t src, size_t vl) {
+  return vbrev8_v_i8m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vbrev8_i8m4_m(vbool2_t mask, vint8m4_t merge, vint8m4_t src, size_t vl) {
+  return vbrev8_v_i8m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vbrev8_i8m4_ta(vint8m4_t src, size_t vl) {
+  return vbrev8_v_i8m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vbrev8_i8m4_tu(vint8m4_t merge, vint8m4_t src, size_t vl) {
+  return vbrev8_v_i8m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vbrev8_i8m4_tama(vbool2_t mask, vint8m4_t src, size_t vl) {
+  return vbrev8_v_i8m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vbrev8_i8m4_tamu(vbool2_t mask, vint8m4_t merge, vint8m4_t src, size_t vl) {
+  return vbrev8_v_i8m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vbrev8_i8m4_tuma(vbool2_t mask, vint8m4_t merge, vint8m4_t src, size_t vl) {
+  return vbrev8_v_i8m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vbrev8_i8m4_tumu(vbool2_t mask, vint8m4_t merge, vint8m4_t src, size_t vl) {
+  return vbrev8_v_i8m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vbrev8_i8m8(vint8m8_t src, size_t vl) {
+  return vbrev8_v_i8m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vbrev8_i8m8_m(vbool1_t mask, vint8m8_t merge, vint8m8_t src, size_t vl) {
+  return vbrev8_v_i8m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vbrev8_i8m8_ta(vint8m8_t src, size_t vl) {
+  return vbrev8_v_i8m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vbrev8_i8m8_tu(vint8m8_t merge, vint8m8_t src, size_t vl) {
+  return vbrev8_v_i8m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vbrev8_i8m8_tama(vbool1_t mask, vint8m8_t src, size_t vl) {
+  return vbrev8_v_i8m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vbrev8_i8m8_tamu(vbool1_t mask, vint8m8_t merge, vint8m8_t src, size_t vl) {
+  return vbrev8_v_i8m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vbrev8_i8m8_tuma(vbool1_t mask, vint8m8_t merge, vint8m8_t src, size_t vl) {
+  return vbrev8_v_i8m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vbrev8_i8m8_tumu(vbool1_t mask, vint8m8_t merge, vint8m8_t src, size_t vl) {
+  return vbrev8_v_i8m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vbrev8_i16m1(vint16m1_t src, size_t vl) {
+  return vbrev8_v_i16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vbrev8_i16m1_m(vbool16_t mask, vint16m1_t merge, vint16m1_t src, size_t vl) {
+  return vbrev8_v_i16m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vbrev8_i16m1_ta(vint16m1_t src, size_t vl) {
+  return vbrev8_v_i16m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vbrev8_i16m1_tu(vint16m1_t merge, vint16m1_t src, size_t vl) {
+  return vbrev8_v_i16m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vbrev8_i16m1_tama(vbool16_t mask, vint16m1_t src, size_t vl) {
+  return vbrev8_v_i16m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vbrev8_i16m1_tamu(vbool16_t mask, vint16m1_t merge, vint16m1_t src, size_t vl) {
+  return vbrev8_v_i16m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vbrev8_i16m1_tuma(vbool16_t mask, vint16m1_t merge, vint16m1_t src, size_t vl) {
+  return vbrev8_v_i16m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vbrev8_i16m1_tumu(vbool16_t mask, vint16m1_t merge, vint16m1_t src, size_t vl) {
+  return vbrev8_v_i16m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vbrev8_i16m2(vint16m2_t src, size_t vl) {
+  return vbrev8_v_i16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vbrev8_i16m2_m(vbool8_t mask, vint16m2_t merge, vint16m2_t src, size_t vl) {
+  return vbrev8_v_i16m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vbrev8_i16m2_ta(vint16m2_t src, size_t vl) {
+  return vbrev8_v_i16m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vbrev8_i16m2_tu(vint16m2_t merge, vint16m2_t src, size_t vl) {
+  return vbrev8_v_i16m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vbrev8_i16m2_tama(vbool8_t mask, vint16m2_t src, size_t vl) {
+  return vbrev8_v_i16m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vbrev8_i16m2_tamu(vbool8_t mask, vint16m2_t merge, vint16m2_t src, size_t vl) {
+  return vbrev8_v_i16m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vbrev8_i16m2_tuma(vbool8_t mask, vint16m2_t merge, vint16m2_t src, size_t vl) {
+  return vbrev8_v_i16m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vbrev8_i16m2_tumu(vbool8_t mask, vint16m2_t merge, vint16m2_t src, size_t vl) {
+  return vbrev8_v_i16m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vbrev8_i16m4(vint16m4_t src, size_t vl) {
+  return vbrev8_v_i16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vbrev8_i16m4_m(vbool4_t mask, vint16m4_t merge, vint16m4_t src, size_t vl) {
+  return vbrev8_v_i16m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vbrev8_i16m4_ta(vint16m4_t src, size_t vl) {
+  return vbrev8_v_i16m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vbrev8_i16m4_tu(vint16m4_t merge, vint16m4_t src, size_t vl) {
+  return vbrev8_v_i16m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vbrev8_i16m4_tama(vbool4_t mask, vint16m4_t src, size_t vl) {
+  return vbrev8_v_i16m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vbrev8_i16m4_tamu(vbool4_t mask, vint16m4_t merge, vint16m4_t src, size_t vl) {
+  return vbrev8_v_i16m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vbrev8_i16m4_tuma(vbool4_t mask, vint16m4_t merge, vint16m4_t src, size_t vl) {
+  return vbrev8_v_i16m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vbrev8_i16m4_tumu(vbool4_t mask, vint16m4_t merge, vint16m4_t src, size_t vl) {
+  return vbrev8_v_i16m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vbrev8_i16m8(vint16m8_t src, size_t vl) {
+  return vbrev8_v_i16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vbrev8_i16m8_m(vbool2_t mask, vint16m8_t merge, vint16m8_t src, size_t vl) {
+  return vbrev8_v_i16m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vbrev8_i16m8_ta(vint16m8_t src, size_t vl) {
+  return vbrev8_v_i16m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vbrev8_i16m8_tu(vint16m8_t merge, vint16m8_t src, size_t vl) {
+  return vbrev8_v_i16m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vbrev8_i16m8_tama(vbool2_t mask, vint16m8_t src, size_t vl) {
+  return vbrev8_v_i16m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vbrev8_i16m8_tamu(vbool2_t mask, vint16m8_t merge, vint16m8_t src, size_t vl) {
+  return vbrev8_v_i16m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vbrev8_i16m8_tuma(vbool2_t mask, vint16m8_t merge, vint16m8_t src, size_t vl) {
+  return vbrev8_v_i16m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vbrev8_i16m8_tumu(vbool2_t mask, vint16m8_t merge, vint16m8_t src, size_t vl) {
+  return vbrev8_v_i16m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vbrev8_i32m1(vint32m1_t src, size_t vl) {
+  return vbrev8_v_i32m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vbrev8_i32m1_m(vbool32_t mask, vint32m1_t merge, vint32m1_t src, size_t vl) {
+  return vbrev8_v_i32m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vbrev8_i32m1_ta(vint32m1_t src, size_t vl) {
+  return vbrev8_v_i32m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vbrev8_i32m1_tu(vint32m1_t merge, vint32m1_t src, size_t vl) {
+  return vbrev8_v_i32m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vbrev8_i32m1_tama(vbool32_t mask, vint32m1_t src, size_t vl) {
+  return vbrev8_v_i32m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vbrev8_i32m1_tamu(vbool32_t mask, vint32m1_t merge, vint32m1_t src, size_t vl) {
+  return vbrev8_v_i32m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vbrev8_i32m1_tuma(vbool32_t mask, vint32m1_t merge, vint32m1_t src, size_t vl) {
+  return vbrev8_v_i32m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vbrev8_i32m1_tumu(vbool32_t mask, vint32m1_t merge, vint32m1_t src, size_t vl) {
+  return vbrev8_v_i32m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vbrev8_i32m2(vint32m2_t src, size_t vl) {
+  return vbrev8_v_i32m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vbrev8_i32m2_m(vbool16_t mask, vint32m2_t merge, vint32m2_t src, size_t vl) {
+  return vbrev8_v_i32m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vbrev8_i32m2_ta(vint32m2_t src, size_t vl) {
+  return vbrev8_v_i32m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vbrev8_i32m2_tu(vint32m2_t merge, vint32m2_t src, size_t vl) {
+  return vbrev8_v_i32m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vbrev8_i32m2_tama(vbool16_t mask, vint32m2_t src, size_t vl) {
+  return vbrev8_v_i32m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vbrev8_i32m2_tamu(vbool16_t mask, vint32m2_t merge, vint32m2_t src, size_t vl) {
+  return vbrev8_v_i32m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vbrev8_i32m2_tuma(vbool16_t mask, vint32m2_t merge, vint32m2_t src, size_t vl) {
+  return vbrev8_v_i32m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vbrev8_i32m2_tumu(vbool16_t mask, vint32m2_t merge, vint32m2_t src, size_t vl) {
+  return vbrev8_v_i32m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vbrev8_i32m4(vint32m4_t src, size_t vl) {
+  return vbrev8_v_i32m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vbrev8_i32m4_m(vbool8_t mask, vint32m4_t merge, vint32m4_t src, size_t vl) {
+  return vbrev8_v_i32m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vbrev8_i32m4_ta(vint32m4_t src, size_t vl) {
+  return vbrev8_v_i32m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vbrev8_i32m4_tu(vint32m4_t merge, vint32m4_t src, size_t vl) {
+  return vbrev8_v_i32m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vbrev8_i32m4_tama(vbool8_t mask, vint32m4_t src, size_t vl) {
+  return vbrev8_v_i32m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vbrev8_i32m4_tamu(vbool8_t mask, vint32m4_t merge, vint32m4_t src, size_t vl) {
+  return vbrev8_v_i32m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vbrev8_i32m4_tuma(vbool8_t mask, vint32m4_t merge, vint32m4_t src, size_t vl) {
+  return vbrev8_v_i32m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vbrev8_i32m4_tumu(vbool8_t mask, vint32m4_t merge, vint32m4_t src, size_t vl) {
+  return vbrev8_v_i32m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vbrev8_i32m8(vint32m8_t src, size_t vl) {
+  return vbrev8_v_i32m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vbrev8_i32m8_m(vbool4_t mask, vint32m8_t merge, vint32m8_t src, size_t vl) {
+  return vbrev8_v_i32m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vbrev8_i32m8_ta(vint32m8_t src, size_t vl) {
+  return vbrev8_v_i32m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vbrev8_i32m8_tu(vint32m8_t merge, vint32m8_t src, size_t vl) {
+  return vbrev8_v_i32m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vbrev8_i32m8_tama(vbool4_t mask, vint32m8_t src, size_t vl) {
+  return vbrev8_v_i32m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vbrev8_i32m8_tamu(vbool4_t mask, vint32m8_t merge, vint32m8_t src, size_t vl) {
+  return vbrev8_v_i32m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vbrev8_i32m8_tuma(vbool4_t mask, vint32m8_t merge, vint32m8_t src, size_t vl) {
+  return vbrev8_v_i32m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vbrev8_i32m8_tumu(vbool4_t mask, vint32m8_t merge, vint32m8_t src, size_t vl) {
+  return vbrev8_v_i32m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vbrev8_i64m1(vint64m1_t src, size_t vl) {
+  return vbrev8_v_i64m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vbrev8_i64m1_m(vbool64_t mask, vint64m1_t merge, vint64m1_t src, size_t vl) {
+  return vbrev8_v_i64m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vbrev8_i64m1_ta(vint64m1_t src, size_t vl) {
+  return vbrev8_v_i64m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vbrev8_i64m1_tu(vint64m1_t merge, vint64m1_t src, size_t vl) {
+  return vbrev8_v_i64m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vbrev8_i64m1_tama(vbool64_t mask, vint64m1_t src, size_t vl) {
+  return vbrev8_v_i64m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vbrev8_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t src, size_t vl) {
+  return vbrev8_v_i64m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vbrev8_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t src, size_t vl) {
+  return vbrev8_v_i64m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vbrev8_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t src, size_t vl) {
+  return vbrev8_v_i64m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vbrev8_i64m2(vint64m2_t src, size_t vl) {
+  return vbrev8_v_i64m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vbrev8_i64m2_m(vbool32_t mask, vint64m2_t merge, vint64m2_t src, size_t vl) {
+  return vbrev8_v_i64m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vbrev8_i64m2_ta(vint64m2_t src, size_t vl) {
+  return vbrev8_v_i64m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vbrev8_i64m2_tu(vint64m2_t merge, vint64m2_t src, size_t vl) {
+  return vbrev8_v_i64m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vbrev8_i64m2_tama(vbool32_t mask, vint64m2_t src, size_t vl) {
+  return vbrev8_v_i64m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vbrev8_i64m2_tamu(vbool32_t mask, vint64m2_t merge, vint64m2_t src, size_t vl) {
+  return vbrev8_v_i64m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vbrev8_i64m2_tuma(vbool32_t mask, vint64m2_t merge, vint64m2_t src, size_t vl) {
+  return vbrev8_v_i64m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vbrev8_i64m2_tumu(vbool32_t mask, vint64m2_t merge, vint64m2_t src, size_t vl) {
+  return vbrev8_v_i64m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vbrev8_i64m4(vint64m4_t src, size_t vl) {
+  return vbrev8_v_i64m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vbrev8_i64m4_m(vbool16_t mask, vint64m4_t merge, vint64m4_t src, size_t vl) {
+  return vbrev8_v_i64m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vbrev8_i64m4_ta(vint64m4_t src, size_t vl) {
+  return vbrev8_v_i64m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vbrev8_i64m4_tu(vint64m4_t merge, vint64m4_t src, size_t vl) {
+  return vbrev8_v_i64m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vbrev8_i64m4_tama(vbool16_t mask, vint64m4_t src, size_t vl) {
+  return vbrev8_v_i64m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vbrev8_i64m4_tamu(vbool16_t mask, vint64m4_t merge, vint64m4_t src, size_t vl) {
+  return vbrev8_v_i64m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vbrev8_i64m4_tuma(vbool16_t mask, vint64m4_t merge, vint64m4_t src, size_t vl) {
+  return vbrev8_v_i64m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vbrev8_i64m4_tumu(vbool16_t mask, vint64m4_t merge, vint64m4_t src, size_t vl) {
+  return vbrev8_v_i64m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vbrev8_i64m8(vint64m8_t src, size_t vl) {
+  return vbrev8_v_i64m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vbrev8_i64m8_m(vbool8_t mask, vint64m8_t merge, vint64m8_t src, size_t vl) {
+  return vbrev8_v_i64m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vbrev8_i64m8_ta(vint64m8_t src, size_t vl) {
+  return vbrev8_v_i64m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vbrev8_i64m8_tu(vint64m8_t merge, vint64m8_t src, size_t vl) {
+  return vbrev8_v_i64m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vbrev8_i64m8_tama(vbool8_t mask, vint64m8_t src, size_t vl) {
+  return vbrev8_v_i64m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vbrev8_i64m8_tamu(vbool8_t mask, vint64m8_t merge, vint64m8_t src, size_t vl) {
+  return vbrev8_v_i64m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vbrev8_i64m8_tuma(vbool8_t mask, vint64m8_t merge, vint64m8_t src, size_t vl) {
+  return vbrev8_v_i64m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_i64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vbrev8_i64m8_tumu(vbool8_t mask, vint64m8_t merge, vint64m8_t src, size_t vl) {
+  return vbrev8_v_i64m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vbrev8_u8mf8(vuint8mf8_t src, size_t vl) {
+  return vbrev8_v_u8mf8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vbrev8_u8mf8_m(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t src, size_t vl) {
+  return vbrev8_v_u8mf8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vbrev8_u8mf8_ta(vuint8mf8_t src, size_t vl) {
+  return vbrev8_v_u8mf8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vbrev8_u8mf8_tu(vuint8mf8_t merge, vuint8mf8_t src, size_t vl) {
+  return vbrev8_v_u8mf8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vbrev8_u8mf8_tama(vbool64_t mask, vuint8mf8_t src, size_t vl) {
+  return vbrev8_v_u8mf8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vbrev8_u8mf8_tamu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t src, size_t vl) {
+  return vbrev8_v_u8mf8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vbrev8_u8mf8_tuma(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t src, size_t vl) {
+  return vbrev8_v_u8mf8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vbrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vbrev8_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t src, size_t vl) {
+  return vbrev8_v_u8mf8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vbrev8_u8mf4(vuint8mf4_t src, size_t vl) {
+  return vbrev8_v_u8mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vbrev8_u8mf4_m(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t src, size_t vl) {
+  return vbrev8_v_u8mf4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vbrev8_u8mf4_ta(vuint8mf4_t src, size_t vl) {
+  return vbrev8_v_u8mf4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vbrev8_u8mf4_tu(vuint8mf4_t merge, vuint8mf4_t src, size_t vl) {
+  return vbrev8_v_u8mf4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vbrev8_u8mf4_tama(vbool32_t mask, vuint8mf4_t src, size_t vl) {
+  return vbrev8_v_u8mf4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vbrev8_u8mf4_tamu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t src, size_t vl) {
+  return vbrev8_v_u8mf4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vbrev8_u8mf4_tuma(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t src, size_t vl) {
+  return vbrev8_v_u8mf4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vbrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vbrev8_u8mf4_tumu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t src, size_t vl) {
+  return vbrev8_v_u8mf4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vbrev8_u8mf2(vuint8mf2_t src, size_t vl) {
+  return vbrev8_v_u8mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vbrev8_u8mf2_m(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t src, size_t vl) {
+  return vbrev8_v_u8mf2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vbrev8_u8mf2_ta(vuint8mf2_t src, size_t vl) {
+  return vbrev8_v_u8mf2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vbrev8_u8mf2_tu(vuint8mf2_t merge, vuint8mf2_t src, size_t vl) {
+  return vbrev8_v_u8mf2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vbrev8_u8mf2_tama(vbool16_t mask, vuint8mf2_t src, size_t vl) {
+  return vbrev8_v_u8mf2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vbrev8_u8mf2_tamu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t src, size_t vl) {
+  return vbrev8_v_u8mf2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vbrev8_u8mf2_tuma(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t src, size_t vl) {
+  return vbrev8_v_u8mf2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vbrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vbrev8_u8mf2_tumu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t src, size_t vl) {
+  return vbrev8_v_u8mf2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vbrev8_u16mf4(vuint16mf4_t src, size_t vl) {
+  return vbrev8_v_u16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vbrev8_u16mf4_m(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t src, size_t vl) {
+  return vbrev8_v_u16mf4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vbrev8_u16mf4_ta(vuint16mf4_t src, size_t vl) {
+  return vbrev8_v_u16mf4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vbrev8_u16mf4_tu(vuint16mf4_t merge, vuint16mf4_t src, size_t vl) {
+  return vbrev8_v_u16mf4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vbrev8_u16mf4_tama(vbool64_t mask, vuint16mf4_t src, size_t vl) {
+  return vbrev8_v_u16mf4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vbrev8_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t src, size_t vl) {
+  return vbrev8_v_u16mf4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vbrev8_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t src, size_t vl) {
+  return vbrev8_v_u16mf4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vbrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vbrev8_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t src, size_t vl) {
+  return vbrev8_v_u16mf4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vbrev8_u16mf2(vuint16mf2_t src, size_t vl) {
+  return vbrev8_v_u16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vbrev8_u16mf2_m(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t src, size_t vl) {
+  return vbrev8_v_u16mf2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vbrev8_u16mf2_ta(vuint16mf2_t src, size_t vl) {
+  return vbrev8_v_u16mf2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vbrev8_u16mf2_tu(vuint16mf2_t merge, vuint16mf2_t src, size_t vl) {
+  return vbrev8_v_u16mf2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vbrev8_u16mf2_tama(vbool32_t mask, vuint16mf2_t src, size_t vl) {
+  return vbrev8_v_u16mf2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vbrev8_u16mf2_tamu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t src, size_t vl) {
+  return vbrev8_v_u16mf2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vbrev8_u16mf2_tuma(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t src, size_t vl) {
+  return vbrev8_v_u16mf2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vbrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vbrev8_u16mf2_tumu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t src, size_t vl) {
+  return vbrev8_v_u16mf2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vbrev8_u32mf2(vuint32mf2_t src, size_t vl) {
+  return vbrev8_v_u32mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vbrev8_u32mf2_m(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) {
+  return vbrev8_v_u32mf2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vbrev8_u32mf2_ta(vuint32mf2_t src, size_t vl) {
+  return vbrev8_v_u32mf2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vbrev8_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, size_t vl) {
+  return vbrev8_v_u32mf2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vbrev8_u32mf2_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
+  return vbrev8_v_u32mf2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vbrev8_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) {
+  return vbrev8_v_u32mf2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vbrev8_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) {
+  return vbrev8_v_u32mf2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vbrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vbrev8_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) {
+  return vbrev8_v_u32mf2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vbrev8_u8m1(vuint8m1_t src, size_t vl) {
+  return vbrev8_v_u8m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vbrev8_u8m1_m(vbool8_t mask, vuint8m1_t merge, vuint8m1_t src, size_t vl) {
+  return vbrev8_v_u8m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vbrev8_u8m1_ta(vuint8m1_t src, size_t vl) {
+  return vbrev8_v_u8m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vbrev8_u8m1_tu(vuint8m1_t merge, vuint8m1_t src, size_t vl) {
+  return vbrev8_v_u8m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vbrev8_u8m1_tama(vbool8_t mask, vuint8m1_t src, size_t vl) {
+  return vbrev8_v_u8m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vbrev8_u8m1_tamu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t src, size_t vl) {
+  return vbrev8_v_u8m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vbrev8_u8m1_tuma(vbool8_t mask, vuint8m1_t merge, vuint8m1_t src, size_t vl) {
+  return vbrev8_v_u8m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vbrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vbrev8_u8m1_tumu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t src, size_t vl) {
+  return vbrev8_v_u8m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vbrev8_u8m2(vuint8m2_t src, size_t vl) {
+  return vbrev8_v_u8m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vbrev8_u8m2_m(vbool4_t mask, vuint8m2_t merge, vuint8m2_t src, size_t vl) {
+  return vbrev8_v_u8m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vbrev8_u8m2_ta(vuint8m2_t src, size_t vl) {
+  return vbrev8_v_u8m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vbrev8_u8m2_tu(vuint8m2_t merge, vuint8m2_t src, size_t vl) {
+  return vbrev8_v_u8m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vbrev8_u8m2_tama(vbool4_t mask, vuint8m2_t src, size_t vl) {
+  return vbrev8_v_u8m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vbrev8_u8m2_tamu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t src, size_t vl) {
+  return vbrev8_v_u8m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vbrev8_u8m2_tuma(vbool4_t mask, vuint8m2_t merge, vuint8m2_t src, size_t vl) {
+  return vbrev8_v_u8m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vbrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vbrev8_u8m2_tumu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t src, size_t vl) {
+  return vbrev8_v_u8m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vbrev8_u8m4(vuint8m4_t src, size_t vl) {
+  return vbrev8_v_u8m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vbrev8_u8m4_m(vbool2_t mask, vuint8m4_t merge, vuint8m4_t src, size_t vl) {
+  return vbrev8_v_u8m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vbrev8_u8m4_ta(vuint8m4_t src, size_t vl) {
+  return vbrev8_v_u8m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vbrev8_u8m4_tu(vuint8m4_t merge, vuint8m4_t src, size_t vl) {
+  return vbrev8_v_u8m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vbrev8_u8m4_tama(vbool2_t mask, vuint8m4_t src, size_t vl) {
+  return vbrev8_v_u8m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vbrev8_u8m4_tamu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t src, size_t vl) {
+  return vbrev8_v_u8m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vbrev8_u8m4_tuma(vbool2_t mask, vuint8m4_t merge, vuint8m4_t src, size_t vl) {
+  return vbrev8_v_u8m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vbrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vbrev8_u8m4_tumu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t src, size_t vl) {
+  return vbrev8_v_u8m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vbrev8_u8m8(vuint8m8_t src, size_t vl) {
+  return vbrev8_v_u8m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vbrev8_u8m8_m(vbool1_t mask, vuint8m8_t merge, vuint8m8_t src, size_t vl) {
+  return vbrev8_v_u8m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vbrev8_u8m8_ta(vuint8m8_t src, size_t vl) {
+  return vbrev8_v_u8m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vbrev8_u8m8_tu(vuint8m8_t merge, vuint8m8_t src, size_t vl) {
+  return vbrev8_v_u8m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vbrev8_u8m8_tama(vbool1_t mask, vuint8m8_t src, size_t vl) {
+  return vbrev8_v_u8m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vbrev8_u8m8_tamu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t src, size_t vl) {
+  return vbrev8_v_u8m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vbrev8_u8m8_tuma(vbool1_t mask, vuint8m8_t merge, vuint8m8_t src, size_t vl) {
+  return vbrev8_v_u8m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vbrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vbrev8_u8m8_tumu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t src, size_t vl) {
+  return vbrev8_v_u8m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vbrev8_u16m1(vuint16m1_t src, size_t vl) {
+  return vbrev8_v_u16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vbrev8_u16m1_m(vbool16_t mask, vuint16m1_t merge, vuint16m1_t src, size_t vl) {
+  return vbrev8_v_u16m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vbrev8_u16m1_ta(vuint16m1_t src, size_t vl) {
+  return vbrev8_v_u16m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vbrev8_u16m1_tu(vuint16m1_t merge, vuint16m1_t src, size_t vl) {
+  return vbrev8_v_u16m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vbrev8_u16m1_tama(vbool16_t mask, vuint16m1_t src, size_t vl) {
+  return vbrev8_v_u16m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vbrev8_u16m1_tamu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t src, size_t vl) {
+  return vbrev8_v_u16m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vbrev8_u16m1_tuma(vbool16_t mask, vuint16m1_t merge, vuint16m1_t src, size_t vl) {
+  return vbrev8_v_u16m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vbrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vbrev8_u16m1_tumu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t src, size_t vl) {
+  return vbrev8_v_u16m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vbrev8_u16m2(vuint16m2_t src, size_t vl) {
+  return vbrev8_v_u16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vbrev8_u16m2_m(vbool8_t mask, vuint16m2_t merge, vuint16m2_t src, size_t vl) {
+  return vbrev8_v_u16m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vbrev8_u16m2_ta(vuint16m2_t src, size_t vl) {
+  return vbrev8_v_u16m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vbrev8_u16m2_tu(vuint16m2_t merge, vuint16m2_t src, size_t vl) {
+  return vbrev8_v_u16m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vbrev8_u16m2_tama(vbool8_t mask, vuint16m2_t src, size_t vl) {
+  return vbrev8_v_u16m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vbrev8_u16m2_tamu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t src, size_t vl) {
+  return vbrev8_v_u16m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vbrev8_u16m2_tuma(vbool8_t mask, vuint16m2_t merge, vuint16m2_t src, size_t vl) {
+  return vbrev8_v_u16m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vbrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vbrev8_u16m2_tumu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t src, size_t vl) {
+  return vbrev8_v_u16m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vbrev8_u16m4(vuint16m4_t src, size_t vl) {
+  return vbrev8_v_u16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vbrev8_u16m4_m(vbool4_t mask, vuint16m4_t merge, vuint16m4_t src, size_t vl) {
+  return vbrev8_v_u16m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vbrev8_u16m4_ta(vuint16m4_t src, size_t vl) {
+  return vbrev8_v_u16m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vbrev8_u16m4_tu(vuint16m4_t merge, vuint16m4_t src, size_t vl) {
+  return vbrev8_v_u16m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vbrev8_u16m4_tama(vbool4_t mask, vuint16m4_t src, size_t vl) {
+  return vbrev8_v_u16m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vbrev8_u16m4_tamu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t src, size_t vl) {
+  return vbrev8_v_u16m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vbrev8_u16m4_tuma(vbool4_t mask, vuint16m4_t merge, vuint16m4_t src, size_t vl) {
+  return vbrev8_v_u16m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vbrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vbrev8_u16m4_tumu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t src, size_t vl) {
+  return vbrev8_v_u16m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vbrev8_u16m8(vuint16m8_t src, size_t vl) {
+  return vbrev8_v_u16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vbrev8_u16m8_m(vbool2_t mask, vuint16m8_t merge, vuint16m8_t src, size_t vl) {
+  return vbrev8_v_u16m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vbrev8_u16m8_ta(vuint16m8_t src, size_t vl) {
+  return vbrev8_v_u16m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vbrev8_u16m8_tu(vuint16m8_t merge, vuint16m8_t src, size_t vl) {
+  return vbrev8_v_u16m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vbrev8_u16m8_tama(vbool2_t mask, vuint16m8_t src, size_t vl) {
+  return vbrev8_v_u16m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vbrev8_u16m8_tamu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t src, size_t vl) {
+  return vbrev8_v_u16m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vbrev8_u16m8_tuma(vbool2_t mask, vuint16m8_t merge, vuint16m8_t src, size_t vl) {
+  return vbrev8_v_u16m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vbrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vbrev8_u16m8_tumu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t src, size_t vl) {
+  return vbrev8_v_u16m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vbrev8_u32m1(vuint32m1_t src, size_t vl) {
+  return vbrev8_v_u32m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vbrev8_u32m1_m(vbool32_t mask, vuint32m1_t merge, vuint32m1_t src, size_t vl) {
+  return vbrev8_v_u32m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vbrev8_u32m1_ta(vuint32m1_t src, size_t vl) {
+  return vbrev8_v_u32m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vbrev8_u32m1_tu(vuint32m1_t merge, vuint32m1_t src, size_t vl) {
+  return vbrev8_v_u32m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vbrev8_u32m1_tama(vbool32_t mask, vuint32m1_t src, size_t vl) {
+  return vbrev8_v_u32m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vbrev8_u32m1_tamu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t src, size_t vl) {
+  return vbrev8_v_u32m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vbrev8_u32m1_tuma(vbool32_t mask, vuint32m1_t merge, vuint32m1_t src, size_t vl) {
+  return vbrev8_v_u32m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vbrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vbrev8_u32m1_tumu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t src, size_t vl) {
+  return vbrev8_v_u32m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vbrev8_u32m2(vuint32m2_t src, size_t vl) {
+  return vbrev8_v_u32m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vbrev8_u32m2_m(vbool16_t mask, vuint32m2_t merge, vuint32m2_t src, size_t vl) {
+  return vbrev8_v_u32m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vbrev8_u32m2_ta(vuint32m2_t src, size_t vl) {
+  return vbrev8_v_u32m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vbrev8_u32m2_tu(vuint32m2_t merge, vuint32m2_t src, size_t vl) {
+  return vbrev8_v_u32m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vbrev8_u32m2_tama(vbool16_t mask, vuint32m2_t src, size_t vl) {
+  return vbrev8_v_u32m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vbrev8_u32m2_tamu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t src, size_t vl) {
+  return vbrev8_v_u32m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vbrev8_u32m2_tuma(vbool16_t mask, vuint32m2_t merge, vuint32m2_t src, size_t vl) {
+  return vbrev8_v_u32m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vbrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vbrev8_u32m2_tumu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t src, size_t vl) {
+  return vbrev8_v_u32m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vbrev8_u32m4(vuint32m4_t src, size_t vl) {
+  return vbrev8_v_u32m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vbrev8_u32m4_m(vbool8_t mask, vuint32m4_t merge, vuint32m4_t src, size_t vl) {
+  return vbrev8_v_u32m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vbrev8_u32m4_ta(vuint32m4_t src, size_t vl) {
+  return vbrev8_v_u32m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vbrev8_u32m4_tu(vuint32m4_t merge, vuint32m4_t src, size_t vl) {
+  return vbrev8_v_u32m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vbrev8_u32m4_tama(vbool8_t mask, vuint32m4_t src, size_t vl) {
+  return vbrev8_v_u32m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vbrev8_u32m4_tamu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t src, size_t vl) {
+  return vbrev8_v_u32m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vbrev8_u32m4_tuma(vbool8_t mask, vuint32m4_t merge, vuint32m4_t src, size_t vl) {
+  return vbrev8_v_u32m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vbrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vbrev8_u32m4_tumu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t src, size_t vl) {
+  return vbrev8_v_u32m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vbrev8_u32m8(vuint32m8_t src, size_t vl) {
+  return vbrev8_v_u32m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vbrev8_u32m8_m(vbool4_t mask, vuint32m8_t merge, vuint32m8_t src, size_t vl) {
+  return vbrev8_v_u32m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vbrev8_u32m8_ta(vuint32m8_t src, size_t vl) {
+  return vbrev8_v_u32m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vbrev8_u32m8_tu(vuint32m8_t merge, vuint32m8_t src, size_t vl) {
+  return vbrev8_v_u32m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vbrev8_u32m8_tama(vbool4_t mask, vuint32m8_t src, size_t vl) {
+  return vbrev8_v_u32m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vbrev8_u32m8_tamu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t src, size_t vl) {
+  return vbrev8_v_u32m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vbrev8_u32m8_tuma(vbool4_t mask, vuint32m8_t merge, vuint32m8_t src, size_t vl) {
+  return vbrev8_v_u32m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vbrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vbrev8_u32m8_tumu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t src, size_t vl) {
+  return vbrev8_v_u32m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vbrev8_u64m1(vuint64m1_t src, size_t vl) {
+  return vbrev8_v_u64m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vbrev8_u64m1_m(vbool64_t mask, vuint64m1_t merge, vuint64m1_t src, size_t vl) {
+  return vbrev8_v_u64m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vbrev8_u64m1_ta(vuint64m1_t src, size_t vl) {
+  return vbrev8_v_u64m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vbrev8_u64m1_tu(vuint64m1_t merge, vuint64m1_t src, size_t vl) {
+  return vbrev8_v_u64m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vbrev8_u64m1_tama(vbool64_t mask, vuint64m1_t src, size_t vl) {
+  return vbrev8_v_u64m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vbrev8_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t src, size_t vl) {
+  return vbrev8_v_u64m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vbrev8_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t src, size_t vl) {
+  return vbrev8_v_u64m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vbrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vbrev8_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t src, size_t vl) {
+  return vbrev8_v_u64m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vbrev8_u64m2(vuint64m2_t src, size_t vl) {
+  return vbrev8_v_u64m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vbrev8_u64m2_m(vbool32_t mask, vuint64m2_t merge, vuint64m2_t src, size_t vl) {
+  return vbrev8_v_u64m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vbrev8_u64m2_ta(vuint64m2_t src, size_t vl) {
+  return vbrev8_v_u64m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vbrev8_u64m2_tu(vuint64m2_t merge, vuint64m2_t src, size_t vl) {
+  return vbrev8_v_u64m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vbrev8_u64m2_tama(vbool32_t mask, vuint64m2_t src, size_t vl) {
+  return vbrev8_v_u64m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vbrev8_u64m2_tamu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t src, size_t vl) {
+  return vbrev8_v_u64m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vbrev8_u64m2_tuma(vbool32_t mask, vuint64m2_t merge, vuint64m2_t src, size_t vl) {
+  return vbrev8_v_u64m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vbrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vbrev8_u64m2_tumu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t src, size_t vl) {
+  return vbrev8_v_u64m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vbrev8_u64m4(vuint64m4_t src, size_t vl) {
+  return vbrev8_v_u64m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vbrev8_u64m4_m(vbool16_t mask, vuint64m4_t merge, vuint64m4_t src, size_t vl) {
+  return vbrev8_v_u64m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vbrev8_u64m4_ta(vuint64m4_t src, size_t vl) {
+  return vbrev8_v_u64m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vbrev8_u64m4_tu(vuint64m4_t merge, vuint64m4_t src, size_t vl) {
+  return vbrev8_v_u64m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vbrev8_u64m4_tama(vbool16_t mask, vuint64m4_t src, size_t vl) {
+  return vbrev8_v_u64m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vbrev8_u64m4_tamu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t src, size_t vl) {
+  return vbrev8_v_u64m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vbrev8_u64m4_tuma(vbool16_t mask, vuint64m4_t merge, vuint64m4_t src, size_t vl) {
+  return vbrev8_v_u64m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vbrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vbrev8_u64m4_tumu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t src, size_t vl) {
+  return vbrev8_v_u64m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vbrev8_u64m8(vuint64m8_t src, size_t vl) {
+  return vbrev8_v_u64m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vbrev8_u64m8_m(vbool8_t mask, vuint64m8_t merge, vuint64m8_t src, size_t vl) {
+  return vbrev8_v_u64m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vbrev8_u64m8_ta(vuint64m8_t src, size_t vl) {
+  return vbrev8_v_u64m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vbrev8_u64m8_tu(vuint64m8_t merge, vuint64m8_t src, size_t vl) {
+  return vbrev8_v_u64m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vbrev8_u64m8_tama(vbool8_t mask, vuint64m8_t src, size_t vl) {
+  return vbrev8_v_u64m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vbrev8_u64m8_tamu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t src, size_t vl) {
+  return vbrev8_v_u64m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vbrev8_u64m8_tuma(vbool8_t mask, vuint64m8_t merge, vuint64m8_t src, size_t vl) {
+  return vbrev8_v_u64m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vbrev8_u64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vbrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vbrev8_u64m8_tumu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t src, size_t vl) {
+  return vbrev8_v_u64m8_tumu(mask, merge, src, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vclmul.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vclmul.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vclmul.c
@@ -0,0 +1,1157 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmul_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vv_i64m1_m(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmul_vv_i64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vv_i64m1_ta(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmul_vv_i64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vv_i64m1_tu(vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmul_vv_i64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vv_i64m1_tama(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmul_vv_i64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmul_vv_i64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmul_vv_i64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmul_vv_i64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmul_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vv_i64m2_m(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmul_vv_i64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vv_i64m2_ta(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmul_vv_i64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vv_i64m2_tu(vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmul_vv_i64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vv_i64m2_tama(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmul_vv_i64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vv_i64m2_tamu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmul_vv_i64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vv_i64m2_tuma(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmul_vv_i64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vv_i64m2_tumu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmul_vv_i64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmul_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vv_i64m4_m(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmul_vv_i64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vv_i64m4_ta(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmul_vv_i64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vv_i64m4_tu(vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmul_vv_i64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vv_i64m4_tama(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmul_vv_i64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vv_i64m4_tamu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmul_vv_i64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vv_i64m4_tuma(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmul_vv_i64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vv_i64m4_tumu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmul_vv_i64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmul_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vv_i64m8_m(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmul_vv_i64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vv_i64m8_ta(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmul_vv_i64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vv_i64m8_tu(vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmul_vv_i64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vv_i64m8_tama(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmul_vv_i64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vv_i64m8_tamu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmul_vv_i64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vv_i64m8_tuma(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmul_vv_i64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_i64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vv_i64m8_tumu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmul_vv_i64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmul_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vv_u64m1_m(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmul_vv_u64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vv_u64m1_ta(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmul_vv_u64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vv_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmul_vv_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vv_u64m1_tama(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmul_vv_u64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vv_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmul_vv_u64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vv_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmul_vv_u64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmul_vv_u64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmul_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vv_u64m2_m(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmul_vv_u64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vv_u64m2_ta(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmul_vv_u64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vv_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmul_vv_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vv_u64m2_tama(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmul_vv_u64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vv_u64m2_tamu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmul_vv_u64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vv_u64m2_tuma(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmul_vv_u64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmul_vv_u64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmul_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vv_u64m4_m(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmul_vv_u64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vv_u64m4_ta(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmul_vv_u64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vv_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmul_vv_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vv_u64m4_tama(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmul_vv_u64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vv_u64m4_tamu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmul_vv_u64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vv_u64m4_tuma(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmul_vv_u64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmul_vv_u64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmul_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vv_u64m8_m(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmul_vv_u64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vv_u64m8_ta(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmul_vv_u64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vv_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmul_vv_u64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vv_u64m8_tama(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmul_vv_u64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vv_u64m8_tamu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmul_vv_u64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vv_u64m8_tuma(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmul_vv_u64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vv_u64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmul_vv_u64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vx_i64m1_m(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vx_i64m1_ta(vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vx_i64m1_tu(vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vx_i64m1_tama(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmul_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vx_i64m2_m(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vx_i64m2_ta(vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vx_i64m2_tu(vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vx_i64m2_tama(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vx_i64m2_tamu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vx_i64m2_tuma(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmul_vx_i64m2_tumu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vx_i64m4_m(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vx_i64m4_ta(vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vx_i64m4_tu(vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vx_i64m4_tama(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vx_i64m4_tamu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vx_i64m4_tuma(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmul_vx_i64m4_tumu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vx_i64m8_m(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vx_i64m8_ta(vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vx_i64m8_tu(vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vx_i64m8_tama(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vx_i64m8_tamu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vx_i64m8_tuma(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_i64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmul_vx_i64m8_tumu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmul_vx_i64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vx_u64m1_m(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vx_u64m1_ta(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vx_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vx_u64m1_tama(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vx_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vx_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmul.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmul_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vx_u64m2_m(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vx_u64m2_ta(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vx_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vx_u64m2_tama(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vx_u64m2_tamu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vx_u64m2_tuma(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmul.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmul_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vx_u64m4_m(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vx_u64m4_ta(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vx_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vx_u64m4_tama(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vx_u64m4_tamu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vx_u64m4_tuma(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmul.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmul_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vx_u64m8_m(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vx_u64m8_ta(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vx_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vx_u64m8_tama(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vx_u64m8_tamu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vx_u64m8_tuma(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmul_vx_u64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmul.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmul_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmul_vx_u64m8_tumu(mask, merge, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vclmulh.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vclmulh.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vclmulh.c
@@ -0,0 +1,1157 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmulh_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vv_i64m1_m(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmulh_vv_i64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vv_i64m1_ta(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmulh_vv_i64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vv_i64m1_tu(vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmulh_vv_i64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vv_i64m1_tama(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmulh_vv_i64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmulh_vv_i64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmulh_vv_i64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vclmulh_vv_i64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmulh_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vv_i64m2_m(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmulh_vv_i64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vv_i64m2_ta(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmulh_vv_i64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vv_i64m2_tu(vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmulh_vv_i64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vv_i64m2_tama(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmulh_vv_i64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vv_i64m2_tamu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmulh_vv_i64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vv_i64m2_tuma(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmulh_vv_i64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vv_i64m2_tumu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vclmulh_vv_i64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmulh_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vv_i64m4_m(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmulh_vv_i64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vv_i64m4_ta(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmulh_vv_i64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vv_i64m4_tu(vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmulh_vv_i64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vv_i64m4_tama(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmulh_vv_i64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vv_i64m4_tamu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmulh_vv_i64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vv_i64m4_tuma(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmulh_vv_i64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vv_i64m4_tumu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vclmulh_vv_i64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmulh_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vv_i64m8_m(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmulh_vv_i64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vv_i64m8_ta(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmulh_vv_i64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vv_i64m8_tu(vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmulh_vv_i64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vv_i64m8_tama(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmulh_vv_i64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vv_i64m8_tamu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmulh_vv_i64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vv_i64m8_tuma(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmulh_vv_i64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_i64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vv_i64m8_tumu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vclmulh_vv_i64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmulh_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vv_u64m1_m(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmulh_vv_u64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vv_u64m1_ta(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmulh_vv_u64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vv_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmulh_vv_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vv_u64m1_tama(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmulh_vv_u64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vv_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmulh_vv_u64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vv_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmulh_vv_u64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vclmulh_vv_u64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmulh_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vv_u64m2_m(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmulh_vv_u64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vv_u64m2_ta(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmulh_vv_u64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vv_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmulh_vv_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vv_u64m2_tama(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmulh_vv_u64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vv_u64m2_tamu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmulh_vv_u64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vv_u64m2_tuma(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmulh_vv_u64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vclmulh_vv_u64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmulh_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vv_u64m4_m(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmulh_vv_u64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vv_u64m4_ta(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmulh_vv_u64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vv_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmulh_vv_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vv_u64m4_tama(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmulh_vv_u64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vv_u64m4_tamu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmulh_vv_u64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vv_u64m4_tuma(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmulh_vv_u64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vclmulh_vv_u64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmulh_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vv_u64m8_m(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmulh_vv_u64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vv_u64m8_ta(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmulh_vv_u64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vv_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmulh_vv_u64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vv_u64m8_tama(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmulh_vv_u64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vv_u64m8_tamu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmulh_vv_u64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vv_u64m8_tuma(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmulh_vv_u64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vv_u64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vclmulh_vv_u64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vx_i64m1_m(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vx_i64m1_ta(vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vx_i64m1_tu(vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vx_i64m1_tama(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vclmulh_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vx_i64m2_m(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vx_i64m2_ta(vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vx_i64m2_tu(vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vx_i64m2_tama(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vx_i64m2_tamu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vx_i64m2_tuma(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vclmulh_vx_i64m2_tumu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vx_i64m4_m(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vx_i64m4_ta(vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vx_i64m4_tu(vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vx_i64m4_tama(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vx_i64m4_tamu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vx_i64m4_tuma(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vclmulh_vx_i64m4_tumu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vx_i64m8_m(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vx_i64m8_ta(vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vx_i64m8_tu(vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vx_i64m8_tama(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vx_i64m8_tamu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vx_i64m8_tuma(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_i64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vclmulh_vx_i64m8_tumu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vclmulh_vx_i64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vx_u64m1_m(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vx_u64m1_ta(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vx_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vx_u64m1_tama(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vx_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vx_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vclmulh.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vclmulh_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vx_u64m2_m(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vx_u64m2_ta(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vx_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vx_u64m2_tama(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vx_u64m2_tamu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vx_u64m2_tuma(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vclmulh.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vclmulh_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vx_u64m4_m(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vx_u64m4_ta(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vx_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vx_u64m4_tama(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vx_u64m4_tamu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vx_u64m4_tuma(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vclmulh.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vclmulh_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vx_u64m8_m(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vx_u64m8_ta(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vx_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vx_u64m8_tama(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vx_u64m8_tamu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vx_u64m8_tuma(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vclmulh_vx_u64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vclmulh.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vclmulh_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vclmulh_vx_u64m8_tumu(mask, merge, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vghmac.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vghmac.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vghmac.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vghmac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vghmac_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vghmac_vv_u32mf2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vghmac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vghmac_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vghmac_vv_u32mf2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vghmac.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vghmac_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vghmac_vv_u32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vghmac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vghmac_vv_u32m1(vuint32m1_t vd, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vghmac_vv_u32m1(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vghmac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vghmac_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vghmac_vv_u32m1_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vghmac.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vghmac_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vghmac_vv_u32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vghmac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vghmac_vv_u32m2(vuint32m2_t vd, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vghmac_vv_u32m2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vghmac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vghmac_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vghmac_vv_u32m2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vghmac.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vghmac_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vghmac_vv_u32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vghmac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vghmac_vv_u32m4(vuint32m4_t vd, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vghmac_vv_u32m4(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vghmac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vghmac_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vghmac_vv_u32m4_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vghmac.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vghmac_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vghmac_vv_u32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vghmac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vghmac_vv_u32m8(vuint32m8_t vd, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vghmac_vv_u32m8(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vghmac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vghmac_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vghmac_vv_u32m8_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vghmac_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vghmac.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vghmac_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vghmac_vv_u32m8_tu(merge, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrev8.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrev8.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrev8.c
@@ -0,0 +1,3173 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrev8_i8mf8(vint8mf8_t src, size_t vl) {
+  return vrev8_v_i8mf8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrev8_i8mf8_m(vbool64_t mask, vint8mf8_t merge, vint8mf8_t src, size_t vl) {
+  return vrev8_v_i8mf8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrev8_i8mf8_ta(vint8mf8_t src, size_t vl) {
+  return vrev8_v_i8mf8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrev8_i8mf8_tu(vint8mf8_t merge, vint8mf8_t src, size_t vl) {
+  return vrev8_v_i8mf8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrev8_i8mf8_tama(vbool64_t mask, vint8mf8_t src, size_t vl) {
+  return vrev8_v_i8mf8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrev8_i8mf8_tamu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t src, size_t vl) {
+  return vrev8_v_i8mf8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrev8_i8mf8_tuma(vbool64_t mask, vint8mf8_t merge, vint8mf8_t src, size_t vl) {
+  return vrev8_v_i8mf8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrev8_i8mf8_tumu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t src, size_t vl) {
+  return vrev8_v_i8mf8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrev8_i8mf4(vint8mf4_t src, size_t vl) {
+  return vrev8_v_i8mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrev8_i8mf4_m(vbool32_t mask, vint8mf4_t merge, vint8mf4_t src, size_t vl) {
+  return vrev8_v_i8mf4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrev8_i8mf4_ta(vint8mf4_t src, size_t vl) {
+  return vrev8_v_i8mf4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrev8_i8mf4_tu(vint8mf4_t merge, vint8mf4_t src, size_t vl) {
+  return vrev8_v_i8mf4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrev8_i8mf4_tama(vbool32_t mask, vint8mf4_t src, size_t vl) {
+  return vrev8_v_i8mf4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrev8_i8mf4_tamu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t src, size_t vl) {
+  return vrev8_v_i8mf4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrev8_i8mf4_tuma(vbool32_t mask, vint8mf4_t merge, vint8mf4_t src, size_t vl) {
+  return vrev8_v_i8mf4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrev8_i8mf4_tumu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t src, size_t vl) {
+  return vrev8_v_i8mf4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrev8_i8mf2(vint8mf2_t src, size_t vl) {
+  return vrev8_v_i8mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrev8_i8mf2_m(vbool16_t mask, vint8mf2_t merge, vint8mf2_t src, size_t vl) {
+  return vrev8_v_i8mf2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrev8_i8mf2_ta(vint8mf2_t src, size_t vl) {
+  return vrev8_v_i8mf2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrev8_i8mf2_tu(vint8mf2_t merge, vint8mf2_t src, size_t vl) {
+  return vrev8_v_i8mf2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrev8_i8mf2_tama(vbool16_t mask, vint8mf2_t src, size_t vl) {
+  return vrev8_v_i8mf2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrev8_i8mf2_tamu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t src, size_t vl) {
+  return vrev8_v_i8mf2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrev8_i8mf2_tuma(vbool16_t mask, vint8mf2_t merge, vint8mf2_t src, size_t vl) {
+  return vrev8_v_i8mf2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrev8_i8mf2_tumu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t src, size_t vl) {
+  return vrev8_v_i8mf2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrev8_i16mf4(vint16mf4_t src, size_t vl) {
+  return vrev8_v_i16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrev8_i16mf4_m(vbool64_t mask, vint16mf4_t merge, vint16mf4_t src, size_t vl) {
+  return vrev8_v_i16mf4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrev8_i16mf4_ta(vint16mf4_t src, size_t vl) {
+  return vrev8_v_i16mf4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrev8_i16mf4_tu(vint16mf4_t merge, vint16mf4_t src, size_t vl) {
+  return vrev8_v_i16mf4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrev8_i16mf4_tama(vbool64_t mask, vint16mf4_t src, size_t vl) {
+  return vrev8_v_i16mf4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrev8_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t src, size_t vl) {
+  return vrev8_v_i16mf4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrev8_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint16mf4_t src, size_t vl) {
+  return vrev8_v_i16mf4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrev8_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t src, size_t vl) {
+  return vrev8_v_i16mf4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrev8_i16mf2(vint16mf2_t src, size_t vl) {
+  return vrev8_v_i16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrev8_i16mf2_m(vbool32_t mask, vint16mf2_t merge, vint16mf2_t src, size_t vl) {
+  return vrev8_v_i16mf2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrev8_i16mf2_ta(vint16mf2_t src, size_t vl) {
+  return vrev8_v_i16mf2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrev8_i16mf2_tu(vint16mf2_t merge, vint16mf2_t src, size_t vl) {
+  return vrev8_v_i16mf2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrev8_i16mf2_tama(vbool32_t mask, vint16mf2_t src, size_t vl) {
+  return vrev8_v_i16mf2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrev8_i16mf2_tamu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t src, size_t vl) {
+  return vrev8_v_i16mf2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrev8_i16mf2_tuma(vbool32_t mask, vint16mf2_t merge, vint16mf2_t src, size_t vl) {
+  return vrev8_v_i16mf2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrev8_i16mf2_tumu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t src, size_t vl) {
+  return vrev8_v_i16mf2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrev8_i32mf2(vint32mf2_t src, size_t vl) {
+  return vrev8_v_i32mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrev8_i32mf2_m(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) {
+  return vrev8_v_i32mf2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrev8_i32mf2_ta(vint32mf2_t src, size_t vl) {
+  return vrev8_v_i32mf2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrev8_i32mf2_tu(vint32mf2_t merge, vint32mf2_t src, size_t vl) {
+  return vrev8_v_i32mf2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrev8_i32mf2_tama(vbool64_t mask, vint32mf2_t src, size_t vl) {
+  return vrev8_v_i32mf2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrev8_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) {
+  return vrev8_v_i32mf2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrev8_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) {
+  return vrev8_v_i32mf2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrev8_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t src, size_t vl) {
+  return vrev8_v_i32mf2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrev8_i8m1(vint8m1_t src, size_t vl) {
+  return vrev8_v_i8m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrev8_i8m1_m(vbool8_t mask, vint8m1_t merge, vint8m1_t src, size_t vl) {
+  return vrev8_v_i8m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrev8_i8m1_ta(vint8m1_t src, size_t vl) {
+  return vrev8_v_i8m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrev8_i8m1_tu(vint8m1_t merge, vint8m1_t src, size_t vl) {
+  return vrev8_v_i8m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrev8_i8m1_tama(vbool8_t mask, vint8m1_t src, size_t vl) {
+  return vrev8_v_i8m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrev8_i8m1_tamu(vbool8_t mask, vint8m1_t merge, vint8m1_t src, size_t vl) {
+  return vrev8_v_i8m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrev8_i8m1_tuma(vbool8_t mask, vint8m1_t merge, vint8m1_t src, size_t vl) {
+  return vrev8_v_i8m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrev8_i8m1_tumu(vbool8_t mask, vint8m1_t merge, vint8m1_t src, size_t vl) {
+  return vrev8_v_i8m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrev8_i8m2(vint8m2_t src, size_t vl) {
+  return vrev8_v_i8m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrev8_i8m2_m(vbool4_t mask, vint8m2_t merge, vint8m2_t src, size_t vl) {
+  return vrev8_v_i8m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrev8_i8m2_ta(vint8m2_t src, size_t vl) {
+  return vrev8_v_i8m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrev8_i8m2_tu(vint8m2_t merge, vint8m2_t src, size_t vl) {
+  return vrev8_v_i8m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrev8_i8m2_tama(vbool4_t mask, vint8m2_t src, size_t vl) {
+  return vrev8_v_i8m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrev8_i8m2_tamu(vbool4_t mask, vint8m2_t merge, vint8m2_t src, size_t vl) {
+  return vrev8_v_i8m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrev8_i8m2_tuma(vbool4_t mask, vint8m2_t merge, vint8m2_t src, size_t vl) {
+  return vrev8_v_i8m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrev8_i8m2_tumu(vbool4_t mask, vint8m2_t merge, vint8m2_t src, size_t vl) {
+  return vrev8_v_i8m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrev8_i8m4(vint8m4_t src, size_t vl) {
+  return vrev8_v_i8m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrev8_i8m4_m(vbool2_t mask, vint8m4_t merge, vint8m4_t src, size_t vl) {
+  return vrev8_v_i8m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrev8_i8m4_ta(vint8m4_t src, size_t vl) {
+  return vrev8_v_i8m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrev8_i8m4_tu(vint8m4_t merge, vint8m4_t src, size_t vl) {
+  return vrev8_v_i8m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrev8_i8m4_tama(vbool2_t mask, vint8m4_t src, size_t vl) {
+  return vrev8_v_i8m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrev8_i8m4_tamu(vbool2_t mask, vint8m4_t merge, vint8m4_t src, size_t vl) {
+  return vrev8_v_i8m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrev8_i8m4_tuma(vbool2_t mask, vint8m4_t merge, vint8m4_t src, size_t vl) {
+  return vrev8_v_i8m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrev8_i8m4_tumu(vbool2_t mask, vint8m4_t merge, vint8m4_t src, size_t vl) {
+  return vrev8_v_i8m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrev8_i8m8(vint8m8_t src, size_t vl) {
+  return vrev8_v_i8m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrev8_i8m8_m(vbool1_t mask, vint8m8_t merge, vint8m8_t src, size_t vl) {
+  return vrev8_v_i8m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrev8_i8m8_ta(vint8m8_t src, size_t vl) {
+  return vrev8_v_i8m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrev8_i8m8_tu(vint8m8_t merge, vint8m8_t src, size_t vl) {
+  return vrev8_v_i8m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrev8_i8m8_tama(vbool1_t mask, vint8m8_t src, size_t vl) {
+  return vrev8_v_i8m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrev8_i8m8_tamu(vbool1_t mask, vint8m8_t merge, vint8m8_t src, size_t vl) {
+  return vrev8_v_i8m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrev8_i8m8_tuma(vbool1_t mask, vint8m8_t merge, vint8m8_t src, size_t vl) {
+  return vrev8_v_i8m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrev8_i8m8_tumu(vbool1_t mask, vint8m8_t merge, vint8m8_t src, size_t vl) {
+  return vrev8_v_i8m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrev8_i16m1(vint16m1_t src, size_t vl) {
+  return vrev8_v_i16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrev8_i16m1_m(vbool16_t mask, vint16m1_t merge, vint16m1_t src, size_t vl) {
+  return vrev8_v_i16m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrev8_i16m1_ta(vint16m1_t src, size_t vl) {
+  return vrev8_v_i16m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrev8_i16m1_tu(vint16m1_t merge, vint16m1_t src, size_t vl) {
+  return vrev8_v_i16m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrev8_i16m1_tama(vbool16_t mask, vint16m1_t src, size_t vl) {
+  return vrev8_v_i16m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrev8_i16m1_tamu(vbool16_t mask, vint16m1_t merge, vint16m1_t src, size_t vl) {
+  return vrev8_v_i16m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrev8_i16m1_tuma(vbool16_t mask, vint16m1_t merge, vint16m1_t src, size_t vl) {
+  return vrev8_v_i16m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrev8_i16m1_tumu(vbool16_t mask, vint16m1_t merge, vint16m1_t src, size_t vl) {
+  return vrev8_v_i16m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrev8_i16m2(vint16m2_t src, size_t vl) {
+  return vrev8_v_i16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrev8_i16m2_m(vbool8_t mask, vint16m2_t merge, vint16m2_t src, size_t vl) {
+  return vrev8_v_i16m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrev8_i16m2_ta(vint16m2_t src, size_t vl) {
+  return vrev8_v_i16m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrev8_i16m2_tu(vint16m2_t merge, vint16m2_t src, size_t vl) {
+  return vrev8_v_i16m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrev8_i16m2_tama(vbool8_t mask, vint16m2_t src, size_t vl) {
+  return vrev8_v_i16m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrev8_i16m2_tamu(vbool8_t mask, vint16m2_t merge, vint16m2_t src, size_t vl) {
+  return vrev8_v_i16m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrev8_i16m2_tuma(vbool8_t mask, vint16m2_t merge, vint16m2_t src, size_t vl) {
+  return vrev8_v_i16m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrev8_i16m2_tumu(vbool8_t mask, vint16m2_t merge, vint16m2_t src, size_t vl) {
+  return vrev8_v_i16m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrev8_i16m4(vint16m4_t src, size_t vl) {
+  return vrev8_v_i16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrev8_i16m4_m(vbool4_t mask, vint16m4_t merge, vint16m4_t src, size_t vl) {
+  return vrev8_v_i16m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrev8_i16m4_ta(vint16m4_t src, size_t vl) {
+  return vrev8_v_i16m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrev8_i16m4_tu(vint16m4_t merge, vint16m4_t src, size_t vl) {
+  return vrev8_v_i16m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrev8_i16m4_tama(vbool4_t mask, vint16m4_t src, size_t vl) {
+  return vrev8_v_i16m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrev8_i16m4_tamu(vbool4_t mask, vint16m4_t merge, vint16m4_t src, size_t vl) {
+  return vrev8_v_i16m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrev8_i16m4_tuma(vbool4_t mask, vint16m4_t merge, vint16m4_t src, size_t vl) {
+  return vrev8_v_i16m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrev8_i16m4_tumu(vbool4_t mask, vint16m4_t merge, vint16m4_t src, size_t vl) {
+  return vrev8_v_i16m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrev8_i16m8(vint16m8_t src, size_t vl) {
+  return vrev8_v_i16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrev8_i16m8_m(vbool2_t mask, vint16m8_t merge, vint16m8_t src, size_t vl) {
+  return vrev8_v_i16m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrev8_i16m8_ta(vint16m8_t src, size_t vl) {
+  return vrev8_v_i16m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrev8_i16m8_tu(vint16m8_t merge, vint16m8_t src, size_t vl) {
+  return vrev8_v_i16m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrev8_i16m8_tama(vbool2_t mask, vint16m8_t src, size_t vl) {
+  return vrev8_v_i16m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrev8_i16m8_tamu(vbool2_t mask, vint16m8_t merge, vint16m8_t src, size_t vl) {
+  return vrev8_v_i16m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrev8_i16m8_tuma(vbool2_t mask, vint16m8_t merge, vint16m8_t src, size_t vl) {
+  return vrev8_v_i16m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrev8_i16m8_tumu(vbool2_t mask, vint16m8_t merge, vint16m8_t src, size_t vl) {
+  return vrev8_v_i16m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrev8_i32m1(vint32m1_t src, size_t vl) {
+  return vrev8_v_i32m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrev8_i32m1_m(vbool32_t mask, vint32m1_t merge, vint32m1_t src, size_t vl) {
+  return vrev8_v_i32m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrev8_i32m1_ta(vint32m1_t src, size_t vl) {
+  return vrev8_v_i32m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrev8_i32m1_tu(vint32m1_t merge, vint32m1_t src, size_t vl) {
+  return vrev8_v_i32m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrev8_i32m1_tama(vbool32_t mask, vint32m1_t src, size_t vl) {
+  return vrev8_v_i32m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrev8_i32m1_tamu(vbool32_t mask, vint32m1_t merge, vint32m1_t src, size_t vl) {
+  return vrev8_v_i32m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrev8_i32m1_tuma(vbool32_t mask, vint32m1_t merge, vint32m1_t src, size_t vl) {
+  return vrev8_v_i32m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrev8_i32m1_tumu(vbool32_t mask, vint32m1_t merge, vint32m1_t src, size_t vl) {
+  return vrev8_v_i32m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrev8_i32m2(vint32m2_t src, size_t vl) {
+  return vrev8_v_i32m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrev8_i32m2_m(vbool16_t mask, vint32m2_t merge, vint32m2_t src, size_t vl) {
+  return vrev8_v_i32m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrev8_i32m2_ta(vint32m2_t src, size_t vl) {
+  return vrev8_v_i32m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrev8_i32m2_tu(vint32m2_t merge, vint32m2_t src, size_t vl) {
+  return vrev8_v_i32m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrev8_i32m2_tama(vbool16_t mask, vint32m2_t src, size_t vl) {
+  return vrev8_v_i32m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrev8_i32m2_tamu(vbool16_t mask, vint32m2_t merge, vint32m2_t src, size_t vl) {
+  return vrev8_v_i32m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrev8_i32m2_tuma(vbool16_t mask, vint32m2_t merge, vint32m2_t src, size_t vl) {
+  return vrev8_v_i32m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrev8_i32m2_tumu(vbool16_t mask, vint32m2_t merge, vint32m2_t src, size_t vl) {
+  return vrev8_v_i32m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrev8_i32m4(vint32m4_t src, size_t vl) {
+  return vrev8_v_i32m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrev8_i32m4_m(vbool8_t mask, vint32m4_t merge, vint32m4_t src, size_t vl) {
+  return vrev8_v_i32m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrev8_i32m4_ta(vint32m4_t src, size_t vl) {
+  return vrev8_v_i32m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrev8_i32m4_tu(vint32m4_t merge, vint32m4_t src, size_t vl) {
+  return vrev8_v_i32m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrev8_i32m4_tama(vbool8_t mask, vint32m4_t src, size_t vl) {
+  return vrev8_v_i32m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrev8_i32m4_tamu(vbool8_t mask, vint32m4_t merge, vint32m4_t src, size_t vl) {
+  return vrev8_v_i32m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrev8_i32m4_tuma(vbool8_t mask, vint32m4_t merge, vint32m4_t src, size_t vl) {
+  return vrev8_v_i32m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrev8_i32m4_tumu(vbool8_t mask, vint32m4_t merge, vint32m4_t src, size_t vl) {
+  return vrev8_v_i32m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrev8_i32m8(vint32m8_t src, size_t vl) {
+  return vrev8_v_i32m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrev8_i32m8_m(vbool4_t mask, vint32m8_t merge, vint32m8_t src, size_t vl) {
+  return vrev8_v_i32m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrev8_i32m8_ta(vint32m8_t src, size_t vl) {
+  return vrev8_v_i32m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrev8_i32m8_tu(vint32m8_t merge, vint32m8_t src, size_t vl) {
+  return vrev8_v_i32m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrev8_i32m8_tama(vbool4_t mask, vint32m8_t src, size_t vl) {
+  return vrev8_v_i32m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrev8_i32m8_tamu(vbool4_t mask, vint32m8_t merge, vint32m8_t src, size_t vl) {
+  return vrev8_v_i32m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrev8_i32m8_tuma(vbool4_t mask, vint32m8_t merge, vint32m8_t src, size_t vl) {
+  return vrev8_v_i32m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrev8_i32m8_tumu(vbool4_t mask, vint32m8_t merge, vint32m8_t src, size_t vl) {
+  return vrev8_v_i32m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrev8_i64m1(vint64m1_t src, size_t vl) {
+  return vrev8_v_i64m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrev8_i64m1_m(vbool64_t mask, vint64m1_t merge, vint64m1_t src, size_t vl) {
+  return vrev8_v_i64m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrev8_i64m1_ta(vint64m1_t src, size_t vl) {
+  return vrev8_v_i64m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrev8_i64m1_tu(vint64m1_t merge, vint64m1_t src, size_t vl) {
+  return vrev8_v_i64m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrev8_i64m1_tama(vbool64_t mask, vint64m1_t src, size_t vl) {
+  return vrev8_v_i64m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrev8_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t src, size_t vl) {
+  return vrev8_v_i64m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrev8_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t src, size_t vl) {
+  return vrev8_v_i64m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrev8_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t src, size_t vl) {
+  return vrev8_v_i64m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrev8_i64m2(vint64m2_t src, size_t vl) {
+  return vrev8_v_i64m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrev8_i64m2_m(vbool32_t mask, vint64m2_t merge, vint64m2_t src, size_t vl) {
+  return vrev8_v_i64m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrev8_i64m2_ta(vint64m2_t src, size_t vl) {
+  return vrev8_v_i64m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrev8_i64m2_tu(vint64m2_t merge, vint64m2_t src, size_t vl) {
+  return vrev8_v_i64m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrev8_i64m2_tama(vbool32_t mask, vint64m2_t src, size_t vl) {
+  return vrev8_v_i64m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrev8_i64m2_tamu(vbool32_t mask, vint64m2_t merge, vint64m2_t src, size_t vl) {
+  return vrev8_v_i64m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrev8_i64m2_tuma(vbool32_t mask, vint64m2_t merge, vint64m2_t src, size_t vl) {
+  return vrev8_v_i64m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrev8_i64m2_tumu(vbool32_t mask, vint64m2_t merge, vint64m2_t src, size_t vl) {
+  return vrev8_v_i64m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrev8_i64m4(vint64m4_t src, size_t vl) {
+  return vrev8_v_i64m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrev8_i64m4_m(vbool16_t mask, vint64m4_t merge, vint64m4_t src, size_t vl) {
+  return vrev8_v_i64m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrev8_i64m4_ta(vint64m4_t src, size_t vl) {
+  return vrev8_v_i64m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrev8_i64m4_tu(vint64m4_t merge, vint64m4_t src, size_t vl) {
+  return vrev8_v_i64m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrev8_i64m4_tama(vbool16_t mask, vint64m4_t src, size_t vl) {
+  return vrev8_v_i64m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrev8_i64m4_tamu(vbool16_t mask, vint64m4_t merge, vint64m4_t src, size_t vl) {
+  return vrev8_v_i64m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrev8_i64m4_tuma(vbool16_t mask, vint64m4_t merge, vint64m4_t src, size_t vl) {
+  return vrev8_v_i64m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrev8_i64m4_tumu(vbool16_t mask, vint64m4_t merge, vint64m4_t src, size_t vl) {
+  return vrev8_v_i64m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrev8_i64m8(vint64m8_t src, size_t vl) {
+  return vrev8_v_i64m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrev8_i64m8_m(vbool8_t mask, vint64m8_t merge, vint64m8_t src, size_t vl) {
+  return vrev8_v_i64m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrev8_i64m8_ta(vint64m8_t src, size_t vl) {
+  return vrev8_v_i64m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrev8_i64m8_tu(vint64m8_t merge, vint64m8_t src, size_t vl) {
+  return vrev8_v_i64m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrev8_i64m8_tama(vbool8_t mask, vint64m8_t src, size_t vl) {
+  return vrev8_v_i64m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrev8_i64m8_tamu(vbool8_t mask, vint64m8_t merge, vint64m8_t src, size_t vl) {
+  return vrev8_v_i64m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrev8_i64m8_tuma(vbool8_t mask, vint64m8_t merge, vint64m8_t src, size_t vl) {
+  return vrev8_v_i64m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_i64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrev8_i64m8_tumu(vbool8_t mask, vint64m8_t merge, vint64m8_t src, size_t vl) {
+  return vrev8_v_i64m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrev8_u8mf8(vuint8mf8_t src, size_t vl) {
+  return vrev8_v_u8mf8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrev8_u8mf8_m(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t src, size_t vl) {
+  return vrev8_v_u8mf8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrev8_u8mf8_ta(vuint8mf8_t src, size_t vl) {
+  return vrev8_v_u8mf8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrev8_u8mf8_tu(vuint8mf8_t merge, vuint8mf8_t src, size_t vl) {
+  return vrev8_v_u8mf8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrev8_u8mf8_tama(vbool64_t mask, vuint8mf8_t src, size_t vl) {
+  return vrev8_v_u8mf8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrev8_u8mf8_tamu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t src, size_t vl) {
+  return vrev8_v_u8mf8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrev8_u8mf8_tuma(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t src, size_t vl) {
+  return vrev8_v_u8mf8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrev8.mask.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrev8_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t src, size_t vl) {
+  return vrev8_v_u8mf8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrev8_u8mf4(vuint8mf4_t src, size_t vl) {
+  return vrev8_v_u8mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrev8_u8mf4_m(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t src, size_t vl) {
+  return vrev8_v_u8mf4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrev8_u8mf4_ta(vuint8mf4_t src, size_t vl) {
+  return vrev8_v_u8mf4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrev8_u8mf4_tu(vuint8mf4_t merge, vuint8mf4_t src, size_t vl) {
+  return vrev8_v_u8mf4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrev8_u8mf4_tama(vbool32_t mask, vuint8mf4_t src, size_t vl) {
+  return vrev8_v_u8mf4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrev8_u8mf4_tamu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t src, size_t vl) {
+  return vrev8_v_u8mf4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrev8_u8mf4_tuma(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t src, size_t vl) {
+  return vrev8_v_u8mf4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrev8.mask.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrev8_u8mf4_tumu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t src, size_t vl) {
+  return vrev8_v_u8mf4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrev8_u8mf2(vuint8mf2_t src, size_t vl) {
+  return vrev8_v_u8mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrev8_u8mf2_m(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t src, size_t vl) {
+  return vrev8_v_u8mf2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrev8_u8mf2_ta(vuint8mf2_t src, size_t vl) {
+  return vrev8_v_u8mf2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrev8_u8mf2_tu(vuint8mf2_t merge, vuint8mf2_t src, size_t vl) {
+  return vrev8_v_u8mf2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrev8_u8mf2_tama(vbool16_t mask, vuint8mf2_t src, size_t vl) {
+  return vrev8_v_u8mf2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrev8_u8mf2_tamu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t src, size_t vl) {
+  return vrev8_v_u8mf2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrev8_u8mf2_tuma(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t src, size_t vl) {
+  return vrev8_v_u8mf2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrev8.mask.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrev8_u8mf2_tumu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t src, size_t vl) {
+  return vrev8_v_u8mf2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrev8_u16mf4(vuint16mf4_t src, size_t vl) {
+  return vrev8_v_u16mf4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrev8_u16mf4_m(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t src, size_t vl) {
+  return vrev8_v_u16mf4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrev8_u16mf4_ta(vuint16mf4_t src, size_t vl) {
+  return vrev8_v_u16mf4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrev8_u16mf4_tu(vuint16mf4_t merge, vuint16mf4_t src, size_t vl) {
+  return vrev8_v_u16mf4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrev8_u16mf4_tama(vbool64_t mask, vuint16mf4_t src, size_t vl) {
+  return vrev8_v_u16mf4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrev8_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t src, size_t vl) {
+  return vrev8_v_u16mf4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrev8_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t src, size_t vl) {
+  return vrev8_v_u16mf4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrev8.mask.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrev8_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t src, size_t vl) {
+  return vrev8_v_u16mf4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrev8_u16mf2(vuint16mf2_t src, size_t vl) {
+  return vrev8_v_u16mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrev8_u16mf2_m(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t src, size_t vl) {
+  return vrev8_v_u16mf2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrev8_u16mf2_ta(vuint16mf2_t src, size_t vl) {
+  return vrev8_v_u16mf2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrev8_u16mf2_tu(vuint16mf2_t merge, vuint16mf2_t src, size_t vl) {
+  return vrev8_v_u16mf2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrev8_u16mf2_tama(vbool32_t mask, vuint16mf2_t src, size_t vl) {
+  return vrev8_v_u16mf2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrev8_u16mf2_tamu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t src, size_t vl) {
+  return vrev8_v_u16mf2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrev8_u16mf2_tuma(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t src, size_t vl) {
+  return vrev8_v_u16mf2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrev8.mask.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrev8_u16mf2_tumu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t src, size_t vl) {
+  return vrev8_v_u16mf2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrev8_u32mf2(vuint32mf2_t src, size_t vl) {
+  return vrev8_v_u32mf2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrev8_u32mf2_m(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) {
+  return vrev8_v_u32mf2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrev8_u32mf2_ta(vuint32mf2_t src, size_t vl) {
+  return vrev8_v_u32mf2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrev8_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t src, size_t vl) {
+  return vrev8_v_u32mf2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrev8_u32mf2_tama(vbool64_t mask, vuint32mf2_t src, size_t vl) {
+  return vrev8_v_u32mf2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrev8_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) {
+  return vrev8_v_u32mf2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrev8_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) {
+  return vrev8_v_u32mf2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrev8.mask.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrev8_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t src, size_t vl) {
+  return vrev8_v_u32mf2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrev8_u8m1(vuint8m1_t src, size_t vl) {
+  return vrev8_v_u8m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrev8_u8m1_m(vbool8_t mask, vuint8m1_t merge, vuint8m1_t src, size_t vl) {
+  return vrev8_v_u8m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrev8_u8m1_ta(vuint8m1_t src, size_t vl) {
+  return vrev8_v_u8m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrev8_u8m1_tu(vuint8m1_t merge, vuint8m1_t src, size_t vl) {
+  return vrev8_v_u8m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrev8_u8m1_tama(vbool8_t mask, vuint8m1_t src, size_t vl) {
+  return vrev8_v_u8m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrev8_u8m1_tamu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t src, size_t vl) {
+  return vrev8_v_u8m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrev8_u8m1_tuma(vbool8_t mask, vuint8m1_t merge, vuint8m1_t src, size_t vl) {
+  return vrev8_v_u8m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrev8.mask.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrev8_u8m1_tumu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t src, size_t vl) {
+  return vrev8_v_u8m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrev8_u8m2(vuint8m2_t src, size_t vl) {
+  return vrev8_v_u8m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrev8_u8m2_m(vbool4_t mask, vuint8m2_t merge, vuint8m2_t src, size_t vl) {
+  return vrev8_v_u8m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrev8_u8m2_ta(vuint8m2_t src, size_t vl) {
+  return vrev8_v_u8m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrev8_u8m2_tu(vuint8m2_t merge, vuint8m2_t src, size_t vl) {
+  return vrev8_v_u8m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrev8_u8m2_tama(vbool4_t mask, vuint8m2_t src, size_t vl) {
+  return vrev8_v_u8m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrev8_u8m2_tamu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t src, size_t vl) {
+  return vrev8_v_u8m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrev8_u8m2_tuma(vbool4_t mask, vuint8m2_t merge, vuint8m2_t src, size_t vl) {
+  return vrev8_v_u8m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrev8.mask.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrev8_u8m2_tumu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t src, size_t vl) {
+  return vrev8_v_u8m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrev8_u8m4(vuint8m4_t src, size_t vl) {
+  return vrev8_v_u8m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrev8_u8m4_m(vbool2_t mask, vuint8m4_t merge, vuint8m4_t src, size_t vl) {
+  return vrev8_v_u8m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrev8_u8m4_ta(vuint8m4_t src, size_t vl) {
+  return vrev8_v_u8m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrev8_u8m4_tu(vuint8m4_t merge, vuint8m4_t src, size_t vl) {
+  return vrev8_v_u8m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrev8_u8m4_tama(vbool2_t mask, vuint8m4_t src, size_t vl) {
+  return vrev8_v_u8m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrev8_u8m4_tamu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t src, size_t vl) {
+  return vrev8_v_u8m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrev8_u8m4_tuma(vbool2_t mask, vuint8m4_t merge, vuint8m4_t src, size_t vl) {
+  return vrev8_v_u8m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrev8.mask.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrev8_u8m4_tumu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t src, size_t vl) {
+  return vrev8_v_u8m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrev8_u8m8(vuint8m8_t src, size_t vl) {
+  return vrev8_v_u8m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrev8_u8m8_m(vbool1_t mask, vuint8m8_t merge, vuint8m8_t src, size_t vl) {
+  return vrev8_v_u8m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrev8_u8m8_ta(vuint8m8_t src, size_t vl) {
+  return vrev8_v_u8m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrev8_u8m8_tu(vuint8m8_t merge, vuint8m8_t src, size_t vl) {
+  return vrev8_v_u8m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrev8_u8m8_tama(vbool1_t mask, vuint8m8_t src, size_t vl) {
+  return vrev8_v_u8m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrev8_u8m8_tamu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t src, size_t vl) {
+  return vrev8_v_u8m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrev8_u8m8_tuma(vbool1_t mask, vuint8m8_t merge, vuint8m8_t src, size_t vl) {
+  return vrev8_v_u8m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrev8.mask.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[SRC:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrev8_u8m8_tumu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t src, size_t vl) {
+  return vrev8_v_u8m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrev8_u16m1(vuint16m1_t src, size_t vl) {
+  return vrev8_v_u16m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrev8_u16m1_m(vbool16_t mask, vuint16m1_t merge, vuint16m1_t src, size_t vl) {
+  return vrev8_v_u16m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrev8_u16m1_ta(vuint16m1_t src, size_t vl) {
+  return vrev8_v_u16m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrev8_u16m1_tu(vuint16m1_t merge, vuint16m1_t src, size_t vl) {
+  return vrev8_v_u16m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrev8_u16m1_tama(vbool16_t mask, vuint16m1_t src, size_t vl) {
+  return vrev8_v_u16m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrev8_u16m1_tamu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t src, size_t vl) {
+  return vrev8_v_u16m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrev8_u16m1_tuma(vbool16_t mask, vuint16m1_t merge, vuint16m1_t src, size_t vl) {
+  return vrev8_v_u16m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrev8.mask.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrev8_u16m1_tumu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t src, size_t vl) {
+  return vrev8_v_u16m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrev8_u16m2(vuint16m2_t src, size_t vl) {
+  return vrev8_v_u16m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrev8_u16m2_m(vbool8_t mask, vuint16m2_t merge, vuint16m2_t src, size_t vl) {
+  return vrev8_v_u16m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrev8_u16m2_ta(vuint16m2_t src, size_t vl) {
+  return vrev8_v_u16m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrev8_u16m2_tu(vuint16m2_t merge, vuint16m2_t src, size_t vl) {
+  return vrev8_v_u16m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrev8_u16m2_tama(vbool8_t mask, vuint16m2_t src, size_t vl) {
+  return vrev8_v_u16m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrev8_u16m2_tamu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t src, size_t vl) {
+  return vrev8_v_u16m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrev8_u16m2_tuma(vbool8_t mask, vuint16m2_t merge, vuint16m2_t src, size_t vl) {
+  return vrev8_v_u16m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrev8.mask.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrev8_u16m2_tumu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t src, size_t vl) {
+  return vrev8_v_u16m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrev8_u16m4(vuint16m4_t src, size_t vl) {
+  return vrev8_v_u16m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrev8_u16m4_m(vbool4_t mask, vuint16m4_t merge, vuint16m4_t src, size_t vl) {
+  return vrev8_v_u16m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrev8_u16m4_ta(vuint16m4_t src, size_t vl) {
+  return vrev8_v_u16m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrev8_u16m4_tu(vuint16m4_t merge, vuint16m4_t src, size_t vl) {
+  return vrev8_v_u16m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrev8_u16m4_tama(vbool4_t mask, vuint16m4_t src, size_t vl) {
+  return vrev8_v_u16m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrev8_u16m4_tamu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t src, size_t vl) {
+  return vrev8_v_u16m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrev8_u16m4_tuma(vbool4_t mask, vuint16m4_t merge, vuint16m4_t src, size_t vl) {
+  return vrev8_v_u16m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrev8.mask.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrev8_u16m4_tumu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t src, size_t vl) {
+  return vrev8_v_u16m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrev8_u16m8(vuint16m8_t src, size_t vl) {
+  return vrev8_v_u16m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrev8_u16m8_m(vbool2_t mask, vuint16m8_t merge, vuint16m8_t src, size_t vl) {
+  return vrev8_v_u16m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrev8_u16m8_ta(vuint16m8_t src, size_t vl) {
+  return vrev8_v_u16m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrev8_u16m8_tu(vuint16m8_t merge, vuint16m8_t src, size_t vl) {
+  return vrev8_v_u16m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrev8_u16m8_tama(vbool2_t mask, vuint16m8_t src, size_t vl) {
+  return vrev8_v_u16m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrev8_u16m8_tamu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t src, size_t vl) {
+  return vrev8_v_u16m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrev8_u16m8_tuma(vbool2_t mask, vuint16m8_t merge, vuint16m8_t src, size_t vl) {
+  return vrev8_v_u16m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrev8.mask.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[SRC:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrev8_u16m8_tumu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t src, size_t vl) {
+  return vrev8_v_u16m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrev8_u32m1(vuint32m1_t src, size_t vl) {
+  return vrev8_v_u32m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrev8_u32m1_m(vbool32_t mask, vuint32m1_t merge, vuint32m1_t src, size_t vl) {
+  return vrev8_v_u32m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrev8_u32m1_ta(vuint32m1_t src, size_t vl) {
+  return vrev8_v_u32m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrev8_u32m1_tu(vuint32m1_t merge, vuint32m1_t src, size_t vl) {
+  return vrev8_v_u32m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrev8_u32m1_tama(vbool32_t mask, vuint32m1_t src, size_t vl) {
+  return vrev8_v_u32m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrev8_u32m1_tamu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t src, size_t vl) {
+  return vrev8_v_u32m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrev8_u32m1_tuma(vbool32_t mask, vuint32m1_t merge, vuint32m1_t src, size_t vl) {
+  return vrev8_v_u32m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrev8.mask.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrev8_u32m1_tumu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t src, size_t vl) {
+  return vrev8_v_u32m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrev8_u32m2(vuint32m2_t src, size_t vl) {
+  return vrev8_v_u32m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrev8_u32m2_m(vbool16_t mask, vuint32m2_t merge, vuint32m2_t src, size_t vl) {
+  return vrev8_v_u32m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrev8_u32m2_ta(vuint32m2_t src, size_t vl) {
+  return vrev8_v_u32m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrev8_u32m2_tu(vuint32m2_t merge, vuint32m2_t src, size_t vl) {
+  return vrev8_v_u32m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrev8_u32m2_tama(vbool16_t mask, vuint32m2_t src, size_t vl) {
+  return vrev8_v_u32m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrev8_u32m2_tamu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t src, size_t vl) {
+  return vrev8_v_u32m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrev8_u32m2_tuma(vbool16_t mask, vuint32m2_t merge, vuint32m2_t src, size_t vl) {
+  return vrev8_v_u32m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrev8.mask.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrev8_u32m2_tumu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t src, size_t vl) {
+  return vrev8_v_u32m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrev8_u32m4(vuint32m4_t src, size_t vl) {
+  return vrev8_v_u32m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrev8_u32m4_m(vbool8_t mask, vuint32m4_t merge, vuint32m4_t src, size_t vl) {
+  return vrev8_v_u32m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrev8_u32m4_ta(vuint32m4_t src, size_t vl) {
+  return vrev8_v_u32m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrev8_u32m4_tu(vuint32m4_t merge, vuint32m4_t src, size_t vl) {
+  return vrev8_v_u32m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrev8_u32m4_tama(vbool8_t mask, vuint32m4_t src, size_t vl) {
+  return vrev8_v_u32m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrev8_u32m4_tamu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t src, size_t vl) {
+  return vrev8_v_u32m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrev8_u32m4_tuma(vbool8_t mask, vuint32m4_t merge, vuint32m4_t src, size_t vl) {
+  return vrev8_v_u32m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrev8.mask.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrev8_u32m4_tumu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t src, size_t vl) {
+  return vrev8_v_u32m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrev8_u32m8(vuint32m8_t src, size_t vl) {
+  return vrev8_v_u32m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrev8_u32m8_m(vbool4_t mask, vuint32m8_t merge, vuint32m8_t src, size_t vl) {
+  return vrev8_v_u32m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrev8_u32m8_ta(vuint32m8_t src, size_t vl) {
+  return vrev8_v_u32m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrev8_u32m8_tu(vuint32m8_t merge, vuint32m8_t src, size_t vl) {
+  return vrev8_v_u32m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrev8_u32m8_tama(vbool4_t mask, vuint32m8_t src, size_t vl) {
+  return vrev8_v_u32m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrev8_u32m8_tamu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t src, size_t vl) {
+  return vrev8_v_u32m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrev8_u32m8_tuma(vbool4_t mask, vuint32m8_t merge, vuint32m8_t src, size_t vl) {
+  return vrev8_v_u32m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrev8.mask.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[SRC:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrev8_u32m8_tumu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t src, size_t vl) {
+  return vrev8_v_u32m8_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrev8_u64m1(vuint64m1_t src, size_t vl) {
+  return vrev8_v_u64m1(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrev8_u64m1_m(vbool64_t mask, vuint64m1_t merge, vuint64m1_t src, size_t vl) {
+  return vrev8_v_u64m1_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrev8_u64m1_ta(vuint64m1_t src, size_t vl) {
+  return vrev8_v_u64m1_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrev8_u64m1_tu(vuint64m1_t merge, vuint64m1_t src, size_t vl) {
+  return vrev8_v_u64m1_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrev8_u64m1_tama(vbool64_t mask, vuint64m1_t src, size_t vl) {
+  return vrev8_v_u64m1_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrev8_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t src, size_t vl) {
+  return vrev8_v_u64m1_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrev8_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t src, size_t vl) {
+  return vrev8_v_u64m1_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrev8.mask.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[SRC:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrev8_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t src, size_t vl) {
+  return vrev8_v_u64m1_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrev8_u64m2(vuint64m2_t src, size_t vl) {
+  return vrev8_v_u64m2(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrev8_u64m2_m(vbool32_t mask, vuint64m2_t merge, vuint64m2_t src, size_t vl) {
+  return vrev8_v_u64m2_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrev8_u64m2_ta(vuint64m2_t src, size_t vl) {
+  return vrev8_v_u64m2_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrev8_u64m2_tu(vuint64m2_t merge, vuint64m2_t src, size_t vl) {
+  return vrev8_v_u64m2_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrev8_u64m2_tama(vbool32_t mask, vuint64m2_t src, size_t vl) {
+  return vrev8_v_u64m2_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrev8_u64m2_tamu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t src, size_t vl) {
+  return vrev8_v_u64m2_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrev8_u64m2_tuma(vbool32_t mask, vuint64m2_t merge, vuint64m2_t src, size_t vl) {
+  return vrev8_v_u64m2_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrev8.mask.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[SRC:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrev8_u64m2_tumu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t src, size_t vl) {
+  return vrev8_v_u64m2_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrev8_u64m4(vuint64m4_t src, size_t vl) {
+  return vrev8_v_u64m4(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrev8_u64m4_m(vbool16_t mask, vuint64m4_t merge, vuint64m4_t src, size_t vl) {
+  return vrev8_v_u64m4_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrev8_u64m4_ta(vuint64m4_t src, size_t vl) {
+  return vrev8_v_u64m4_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrev8_u64m4_tu(vuint64m4_t merge, vuint64m4_t src, size_t vl) {
+  return vrev8_v_u64m4_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrev8_u64m4_tama(vbool16_t mask, vuint64m4_t src, size_t vl) {
+  return vrev8_v_u64m4_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrev8_u64m4_tamu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t src, size_t vl) {
+  return vrev8_v_u64m4_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrev8_u64m4_tuma(vbool16_t mask, vuint64m4_t merge, vuint64m4_t src, size_t vl) {
+  return vrev8_v_u64m4_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrev8.mask.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[SRC:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrev8_u64m4_tumu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t src, size_t vl) {
+  return vrev8_v_u64m4_tumu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrev8_u64m8(vuint64m8_t src, size_t vl) {
+  return vrev8_v_u64m8(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrev8_u64m8_m(vbool8_t mask, vuint64m8_t merge, vuint64m8_t src, size_t vl) {
+  return vrev8_v_u64m8_m(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrev8_u64m8_ta(vuint64m8_t src, size_t vl) {
+  return vrev8_v_u64m8_ta(src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrev8_u64m8_tu(vuint64m8_t merge, vuint64m8_t src, size_t vl) {
+  return vrev8_v_u64m8_tu(merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrev8_u64m8_tama(vbool8_t mask, vuint64m8_t src, size_t vl) {
+  return vrev8_v_u64m8_tama(mask, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrev8_u64m8_tamu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t src, size_t vl) {
+  return vrev8_v_u64m8_tamu(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrev8_u64m8_tuma(vbool8_t mask, vuint64m8_t merge, vuint64m8_t src, size_t vl) {
+  return vrev8_v_u64m8_tuma(mask, merge, src, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrev8_u64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrev8.mask.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[SRC:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrev8_u64m8_tumu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t src, size_t vl) {
+  return vrev8_v_u64m8_tumu(mask, merge, src, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vrol.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrol.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vrol.c
@@ -0,0 +1,6341 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vrol_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vv_i8mf8_m(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vrol_vv_i8mf8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vv_i8mf8_ta(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vrol_vv_i8mf8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vv_i8mf8_tu(vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vrol_vv_i8mf8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vv_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vrol_vv_i8mf8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vv_i8mf8_tamu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vrol_vv_i8mf8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vv_i8mf8_tuma(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vrol_vv_i8mf8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vrol_vv_i8mf8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vrol_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vv_i8mf4_m(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vrol_vv_i8mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vv_i8mf4_ta(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vrol_vv_i8mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vv_i8mf4_tu(vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vrol_vv_i8mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vv_i8mf4_tama(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vrol_vv_i8mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vv_i8mf4_tamu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vrol_vv_i8mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vv_i8mf4_tuma(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vrol_vv_i8mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vrol_vv_i8mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vrol_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vv_i8mf2_m(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vrol_vv_i8mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vv_i8mf2_ta(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vrol_vv_i8mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vv_i8mf2_tu(vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vrol_vv_i8mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vv_i8mf2_tama(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vrol_vv_i8mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vv_i8mf2_tamu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vrol_vv_i8mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vv_i8mf2_tuma(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vrol_vv_i8mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vrol_vv_i8mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vrol_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vv_i16mf4_m(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vrol_vv_i16mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vv_i16mf4_ta(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vrol_vv_i16mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vv_i16mf4_tu(vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vrol_vv_i16mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vv_i16mf4_tama(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vrol_vv_i16mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vv_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vrol_vv_i16mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vv_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vrol_vv_i16mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vrol_vv_i16mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vrol_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vv_i16mf2_m(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vrol_vv_i16mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vv_i16mf2_ta(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vrol_vv_i16mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vv_i16mf2_tu(vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vrol_vv_i16mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vv_i16mf2_tama(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vrol_vv_i16mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vv_i16mf2_tamu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vrol_vv_i16mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vv_i16mf2_tuma(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vrol_vv_i16mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vrol_vv_i16mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vrol_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vv_i32mf2_m(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vrol_vv_i32mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vrol_vv_i32mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vrol_vv_i32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vrol_vv_i32mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vrol_vv_i32mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vrol_vv_i32mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vrol_vv_i32mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vrol_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vv_i8m1_m(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vrol_vv_i8m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vv_i8m1_ta(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vrol_vv_i8m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vv_i8m1_tu(vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vrol_vv_i8m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vv_i8m1_tama(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vrol_vv_i8m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vv_i8m1_tamu(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vrol_vv_i8m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vv_i8m1_tuma(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vrol_vv_i8m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vv_i8m1_tumu(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vrol_vv_i8m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vrol_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vv_i8m2_m(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vrol_vv_i8m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vv_i8m2_ta(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vrol_vv_i8m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vv_i8m2_tu(vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vrol_vv_i8m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vv_i8m2_tama(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vrol_vv_i8m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vv_i8m2_tamu(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vrol_vv_i8m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vv_i8m2_tuma(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vrol_vv_i8m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vv_i8m2_tumu(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vrol_vv_i8m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vrol_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vv_i8m4_m(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vrol_vv_i8m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vv_i8m4_ta(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vrol_vv_i8m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vv_i8m4_tu(vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vrol_vv_i8m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vv_i8m4_tama(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vrol_vv_i8m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vv_i8m4_tamu(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vrol_vv_i8m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vv_i8m4_tuma(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vrol_vv_i8m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vv_i8m4_tumu(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vrol_vv_i8m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vrol_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vv_i8m8_m(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vrol_vv_i8m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vv_i8m8_ta(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vrol_vv_i8m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vv_i8m8_tu(vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vrol_vv_i8m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vv_i8m8_tama(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vrol_vv_i8m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vv_i8m8_tamu(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vrol_vv_i8m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vv_i8m8_tuma(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vrol_vv_i8m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vv_i8m8_tumu(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vrol_vv_i8m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vrol_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vv_i16m1_m(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vrol_vv_i16m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vv_i16m1_ta(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vrol_vv_i16m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vv_i16m1_tu(vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vrol_vv_i16m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vv_i16m1_tama(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vrol_vv_i16m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vv_i16m1_tamu(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vrol_vv_i16m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vv_i16m1_tuma(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vrol_vv_i16m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vv_i16m1_tumu(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vrol_vv_i16m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vrol_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vv_i16m2_m(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vrol_vv_i16m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vv_i16m2_ta(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vrol_vv_i16m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vv_i16m2_tu(vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vrol_vv_i16m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vv_i16m2_tama(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vrol_vv_i16m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vv_i16m2_tamu(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vrol_vv_i16m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vv_i16m2_tuma(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vrol_vv_i16m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vv_i16m2_tumu(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vrol_vv_i16m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vrol_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vv_i16m4_m(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vrol_vv_i16m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vv_i16m4_ta(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vrol_vv_i16m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vv_i16m4_tu(vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vrol_vv_i16m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vv_i16m4_tama(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vrol_vv_i16m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vv_i16m4_tamu(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vrol_vv_i16m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vv_i16m4_tuma(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vrol_vv_i16m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vv_i16m4_tumu(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vrol_vv_i16m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vrol_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vv_i16m8_m(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vrol_vv_i16m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vv_i16m8_ta(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vrol_vv_i16m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vv_i16m8_tu(vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vrol_vv_i16m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vv_i16m8_tama(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vrol_vv_i16m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vv_i16m8_tamu(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vrol_vv_i16m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vv_i16m8_tuma(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vrol_vv_i16m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vv_i16m8_tumu(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vrol_vv_i16m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vrol_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vv_i32m1_m(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vrol_vv_i32m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vv_i32m1_ta(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vrol_vv_i32m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vv_i32m1_tu(vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vrol_vv_i32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vv_i32m1_tama(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vrol_vv_i32m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vv_i32m1_tamu(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vrol_vv_i32m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vv_i32m1_tuma(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vrol_vv_i32m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vv_i32m1_tumu(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vrol_vv_i32m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vrol_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vv_i32m2_m(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vrol_vv_i32m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vv_i32m2_ta(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vrol_vv_i32m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vv_i32m2_tu(vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vrol_vv_i32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vv_i32m2_tama(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vrol_vv_i32m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vv_i32m2_tamu(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vrol_vv_i32m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vv_i32m2_tuma(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vrol_vv_i32m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vv_i32m2_tumu(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vrol_vv_i32m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vrol_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vv_i32m4_m(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vrol_vv_i32m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vv_i32m4_ta(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vrol_vv_i32m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vv_i32m4_tu(vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vrol_vv_i32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vv_i32m4_tama(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vrol_vv_i32m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vv_i32m4_tamu(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vrol_vv_i32m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vv_i32m4_tuma(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vrol_vv_i32m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vv_i32m4_tumu(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vrol_vv_i32m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vrol_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vv_i32m8_m(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vrol_vv_i32m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vv_i32m8_ta(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vrol_vv_i32m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vv_i32m8_tu(vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vrol_vv_i32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vv_i32m8_tama(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vrol_vv_i32m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vv_i32m8_tamu(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vrol_vv_i32m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vv_i32m8_tuma(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vrol_vv_i32m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vv_i32m8_tumu(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vrol_vv_i32m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vrol_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vv_i64m1_m(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vrol_vv_i64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vv_i64m1_ta(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vrol_vv_i64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vv_i64m1_tu(vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vrol_vv_i64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vv_i64m1_tama(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vrol_vv_i64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vrol_vv_i64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vrol_vv_i64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vrol_vv_i64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vrol_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vv_i64m2_m(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vrol_vv_i64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vv_i64m2_ta(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vrol_vv_i64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vv_i64m2_tu(vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vrol_vv_i64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vv_i64m2_tama(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vrol_vv_i64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vv_i64m2_tamu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vrol_vv_i64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vv_i64m2_tuma(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vrol_vv_i64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vv_i64m2_tumu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vrol_vv_i64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vrol_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vv_i64m4_m(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vrol_vv_i64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vv_i64m4_ta(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vrol_vv_i64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vv_i64m4_tu(vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vrol_vv_i64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vv_i64m4_tama(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vrol_vv_i64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vv_i64m4_tamu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vrol_vv_i64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vv_i64m4_tuma(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vrol_vv_i64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vv_i64m4_tumu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vrol_vv_i64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vrol_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vv_i64m8_m(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vrol_vv_i64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vv_i64m8_ta(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vrol_vv_i64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vv_i64m8_tu(vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vrol_vv_i64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vv_i64m8_tama(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vrol_vv_i64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vv_i64m8_tamu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vrol_vv_i64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vv_i64m8_tuma(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vrol_vv_i64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_i64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vv_i64m8_tumu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vrol_vv_i64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vrol_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vrol_vv_u8mf8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vv_u8mf8_ta(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vrol_vv_u8mf8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vv_u8mf8_tu(vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vrol_vv_u8mf8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vv_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vrol_vv_u8mf8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vv_u8mf8_tamu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vrol_vv_u8mf8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vv_u8mf8_tuma(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vrol_vv_u8mf8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vrol_vv_u8mf8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vrol_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vrol_vv_u8mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vv_u8mf4_ta(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vrol_vv_u8mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vv_u8mf4_tu(vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vrol_vv_u8mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vv_u8mf4_tama(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vrol_vv_u8mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vv_u8mf4_tamu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vrol_vv_u8mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vv_u8mf4_tuma(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vrol_vv_u8mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vrol_vv_u8mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vrol_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vrol_vv_u8mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vv_u8mf2_ta(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vrol_vv_u8mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vv_u8mf2_tu(vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vrol_vv_u8mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vv_u8mf2_tama(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vrol_vv_u8mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vv_u8mf2_tamu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vrol_vv_u8mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vv_u8mf2_tuma(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vrol_vv_u8mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vrol_vv_u8mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vrol_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vrol_vv_u16mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vv_u16mf4_ta(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vrol_vv_u16mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vv_u16mf4_tu(vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vrol_vv_u16mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vv_u16mf4_tama(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vrol_vv_u16mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vv_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vrol_vv_u16mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vv_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vrol_vv_u16mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vrol_vv_u16mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vrol_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vrol_vv_u16mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vv_u16mf2_ta(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vrol_vv_u16mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vv_u16mf2_tu(vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vrol_vv_u16mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vv_u16mf2_tama(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vrol_vv_u16mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vv_u16mf2_tamu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vrol_vv_u16mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vv_u16mf2_tuma(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vrol_vv_u16mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vrol_vv_u16mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vrol_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vrol_vv_u32mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vrol_vv_u32mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vrol_vv_u32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vrol_vv_u32mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vrol_vv_u32mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vrol_vv_u32mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vrol_vv_u32mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vrol_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vv_u8m1_m(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vrol_vv_u8m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vv_u8m1_ta(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vrol_vv_u8m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vv_u8m1_tu(vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vrol_vv_u8m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vv_u8m1_tama(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vrol_vv_u8m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vv_u8m1_tamu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vrol_vv_u8m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vv_u8m1_tuma(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vrol_vv_u8m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vrol_vv_u8m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vrol_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vv_u8m2_m(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vrol_vv_u8m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vv_u8m2_ta(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vrol_vv_u8m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vv_u8m2_tu(vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vrol_vv_u8m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vv_u8m2_tama(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vrol_vv_u8m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vv_u8m2_tamu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vrol_vv_u8m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vv_u8m2_tuma(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vrol_vv_u8m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vrol_vv_u8m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vrol_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vv_u8m4_m(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vrol_vv_u8m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vv_u8m4_ta(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vrol_vv_u8m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vv_u8m4_tu(vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vrol_vv_u8m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vv_u8m4_tama(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vrol_vv_u8m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vv_u8m4_tamu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vrol_vv_u8m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vv_u8m4_tuma(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vrol_vv_u8m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vrol_vv_u8m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vrol_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vv_u8m8_m(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vrol_vv_u8m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vv_u8m8_ta(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vrol_vv_u8m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vv_u8m8_tu(vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vrol_vv_u8m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vv_u8m8_tama(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vrol_vv_u8m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vv_u8m8_tamu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vrol_vv_u8m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vv_u8m8_tuma(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vrol_vv_u8m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vrol_vv_u8m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vrol_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vv_u16m1_m(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vrol_vv_u16m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vv_u16m1_ta(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vrol_vv_u16m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vv_u16m1_tu(vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vrol_vv_u16m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vv_u16m1_tama(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vrol_vv_u16m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vv_u16m1_tamu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vrol_vv_u16m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vv_u16m1_tuma(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vrol_vv_u16m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vrol_vv_u16m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vrol_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vv_u16m2_m(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vrol_vv_u16m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vv_u16m2_ta(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vrol_vv_u16m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vv_u16m2_tu(vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vrol_vv_u16m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vv_u16m2_tama(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vrol_vv_u16m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vv_u16m2_tamu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vrol_vv_u16m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vv_u16m2_tuma(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vrol_vv_u16m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vrol_vv_u16m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vrol_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vv_u16m4_m(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vrol_vv_u16m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vv_u16m4_ta(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vrol_vv_u16m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vv_u16m4_tu(vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vrol_vv_u16m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vv_u16m4_tama(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vrol_vv_u16m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vv_u16m4_tamu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vrol_vv_u16m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vv_u16m4_tuma(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vrol_vv_u16m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vrol_vv_u16m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vrol_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vv_u16m8_m(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vrol_vv_u16m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vv_u16m8_ta(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vrol_vv_u16m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vv_u16m8_tu(vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vrol_vv_u16m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vv_u16m8_tama(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vrol_vv_u16m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vv_u16m8_tamu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vrol_vv_u16m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vv_u16m8_tuma(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vrol_vv_u16m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vrol_vv_u16m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vrol_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vv_u32m1_m(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vrol_vv_u32m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vv_u32m1_ta(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vrol_vv_u32m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vrol_vv_u32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vv_u32m1_tama(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vrol_vv_u32m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vv_u32m1_tamu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vrol_vv_u32m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vv_u32m1_tuma(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vrol_vv_u32m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vrol_vv_u32m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vrol_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vv_u32m2_m(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vrol_vv_u32m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vv_u32m2_ta(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vrol_vv_u32m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vrol_vv_u32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vv_u32m2_tama(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vrol_vv_u32m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vv_u32m2_tamu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vrol_vv_u32m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vv_u32m2_tuma(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vrol_vv_u32m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vrol_vv_u32m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vrol_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vv_u32m4_m(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vrol_vv_u32m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vv_u32m4_ta(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vrol_vv_u32m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vrol_vv_u32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vv_u32m4_tama(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vrol_vv_u32m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vv_u32m4_tamu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vrol_vv_u32m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vv_u32m4_tuma(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vrol_vv_u32m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vrol_vv_u32m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vrol_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vv_u32m8_m(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vrol_vv_u32m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vv_u32m8_ta(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vrol_vv_u32m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vrol_vv_u32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vv_u32m8_tama(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vrol_vv_u32m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vv_u32m8_tamu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vrol_vv_u32m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vv_u32m8_tuma(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vrol_vv_u32m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vrol_vv_u32m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vrol_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vv_u64m1_m(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vrol_vv_u64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vv_u64m1_ta(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vrol_vv_u64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vv_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vrol_vv_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vv_u64m1_tama(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vrol_vv_u64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vv_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vrol_vv_u64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vv_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vrol_vv_u64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vrol_vv_u64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vrol_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vv_u64m2_m(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vrol_vv_u64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vv_u64m2_ta(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vrol_vv_u64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vv_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vrol_vv_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vv_u64m2_tama(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vrol_vv_u64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vv_u64m2_tamu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vrol_vv_u64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vv_u64m2_tuma(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vrol_vv_u64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vrol_vv_u64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vrol_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vv_u64m4_m(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vrol_vv_u64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vv_u64m4_ta(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vrol_vv_u64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vv_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vrol_vv_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vv_u64m4_tama(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vrol_vv_u64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vv_u64m4_tamu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vrol_vv_u64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vv_u64m4_tuma(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vrol_vv_u64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vrol_vv_u64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vrol_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vv_u64m8_m(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vrol_vv_u64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vv_u64m8_ta(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vrol_vv_u64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vv_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vrol_vv_u64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vv_u64m8_tama(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vrol_vv_u64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vv_u64m8_tamu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vrol_vv_u64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vv_u64m8_tuma(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vrol_vv_u64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vv_u64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vrol_vv_u64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vx_i8mf8_m(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vx_i8mf8_ta(vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vx_i8mf8_tu(vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vx_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vx_i8mf8_tamu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vx_i8mf8_tuma(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vrol_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vx_i8mf4_m(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vx_i8mf4_ta(vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vx_i8mf4_tu(vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vx_i8mf4_tama(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vx_i8mf4_tamu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vx_i8mf4_tuma(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vrol_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vx_i8mf2_m(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vx_i8mf2_ta(vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vx_i8mf2_tu(vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vx_i8mf2_tama(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vx_i8mf2_tamu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vx_i8mf2_tuma(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vrol_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vx_i16mf4_m(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vx_i16mf4_ta(vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vx_i16mf4_tu(vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vx_i16mf4_tama(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vx_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vx_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vrol_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vx_i16mf2_m(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vx_i16mf2_ta(vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vx_i16mf2_tu(vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vx_i16mf2_tama(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vx_i16mf2_tamu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vx_i16mf2_tuma(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vrol_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vx_i32mf2_m(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vrol_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vx_i8m1_m(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vx_i8m1_ta(vint8m1_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vx_i8m1_tu(vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vx_i8m1_tama(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vx_i8m1_tamu(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vx_i8m1_tuma(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vrol_vx_i8m1_tumu(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vx_i8m2_m(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vx_i8m2_ta(vint8m2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vx_i8m2_tu(vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vx_i8m2_tama(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vx_i8m2_tamu(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vx_i8m2_tuma(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vrol_vx_i8m2_tumu(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vx_i8m4_m(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vx_i8m4_ta(vint8m4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vx_i8m4_tu(vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vx_i8m4_tama(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vx_i8m4_tamu(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vx_i8m4_tuma(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vrol_vx_i8m4_tumu(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vx_i8m8_m(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vx_i8m8_ta(vint8m8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vx_i8m8_tu(vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vx_i8m8_tama(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vx_i8m8_tamu(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vx_i8m8_tuma(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vrol_vx_i8m8_tumu(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vrol_vx_i8m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vx_i16m1_m(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vx_i16m1_ta(vint16m1_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vx_i16m1_tu(vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vx_i16m1_tama(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vx_i16m1_tamu(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vx_i16m1_tuma(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vrol_vx_i16m1_tumu(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vx_i16m2_m(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vx_i16m2_ta(vint16m2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vx_i16m2_tu(vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vx_i16m2_tama(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vx_i16m2_tamu(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vx_i16m2_tuma(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vrol_vx_i16m2_tumu(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vx_i16m4_m(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vx_i16m4_ta(vint16m4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vx_i16m4_tu(vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vx_i16m4_tama(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vx_i16m4_tamu(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vx_i16m4_tuma(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vrol_vx_i16m4_tumu(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vx_i16m8_m(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vx_i16m8_ta(vint16m8_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vx_i16m8_tu(vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vx_i16m8_tama(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vx_i16m8_tamu(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vx_i16m8_tuma(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vrol_vx_i16m8_tumu(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vrol_vx_i16m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vx_i32m1_m(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vx_i32m1_ta(vint32m1_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vx_i32m1_tu(vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vx_i32m1_tama(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vx_i32m1_tamu(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vx_i32m1_tuma(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vrol_vx_i32m1_tumu(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vx_i32m2_m(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vx_i32m2_ta(vint32m2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vx_i32m2_tu(vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vx_i32m2_tama(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vx_i32m2_tamu(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vx_i32m2_tuma(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vrol_vx_i32m2_tumu(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vx_i32m4_m(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vx_i32m4_ta(vint32m4_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vx_i32m4_tu(vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vx_i32m4_tama(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vx_i32m4_tamu(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vx_i32m4_tuma(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vrol_vx_i32m4_tumu(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vx_i32m8_m(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vx_i32m8_ta(vint32m8_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vx_i32m8_tu(vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vx_i32m8_tama(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vx_i32m8_tamu(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vx_i32m8_tuma(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vrol_vx_i32m8_tumu(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vrol_vx_i32m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vx_i64m1_m(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vx_i64m1_ta(vint64m1_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vx_i64m1_tu(vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vx_i64m1_tama(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vrol_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vx_i64m2_m(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vx_i64m2_ta(vint64m2_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vx_i64m2_tu(vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vx_i64m2_tama(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vx_i64m2_tamu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vx_i64m2_tuma(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vrol_vx_i64m2_tumu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vx_i64m4_m(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vx_i64m4_ta(vint64m4_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vx_i64m4_tu(vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vx_i64m4_tama(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vx_i64m4_tamu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vx_i64m4_tuma(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vrol_vx_i64m4_tumu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vx_i64m8_m(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vx_i64m8_ta(vint64m8_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vx_i64m8_tu(vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vx_i64m8_tama(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vx_i64m8_tamu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vx_i64m8_tuma(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_i64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vrol_vx_i64m8_tumu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vrol_vx_i64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vx_u8mf8_ta(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vx_u8mf8_tu(vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vx_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vx_u8mf8_tamu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vx_u8mf8_tuma(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vrol.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vrol_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vx_u8mf4_ta(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vx_u8mf4_tu(vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vx_u8mf4_tama(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vx_u8mf4_tamu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vx_u8mf4_tuma(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vrol.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vrol_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vx_u8mf2_ta(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vx_u8mf2_tu(vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vx_u8mf2_tama(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vx_u8mf2_tamu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vx_u8mf2_tuma(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vrol.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vrol_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vx_u16mf4_ta(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vx_u16mf4_tu(vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vx_u16mf4_tama(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vx_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vx_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vrol.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vrol_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vx_u16mf2_ta(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vx_u16mf2_tu(vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vx_u16mf2_tama(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vx_u16mf2_tamu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vx_u16mf2_tuma(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vrol.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vrol_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vrol.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vrol_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vx_u8m1_m(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vx_u8m1_ta(vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vx_u8m1_tu(vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vx_u8m1_tama(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vx_u8m1_tamu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vx_u8m1_tuma(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vrol.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vrol_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vx_u8m2_m(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vx_u8m2_ta(vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vx_u8m2_tu(vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vx_u8m2_tama(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vx_u8m2_tamu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vx_u8m2_tuma(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vrol.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vrol_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vx_u8m4_m(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vx_u8m4_ta(vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vx_u8m4_tu(vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vx_u8m4_tama(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vx_u8m4_tamu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vx_u8m4_tuma(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vrol.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vrol_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vx_u8m8_m(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vx_u8m8_ta(vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vx_u8m8_tu(vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vx_u8m8_tama(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vx_u8m8_tamu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vx_u8m8_tuma(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vrol.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vrol_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vrol_vx_u8m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vx_u16m1_m(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vx_u16m1_ta(vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vx_u16m1_tu(vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vx_u16m1_tama(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vx_u16m1_tamu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vx_u16m1_tuma(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vrol.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vrol_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vx_u16m2_m(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vx_u16m2_ta(vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vx_u16m2_tu(vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vx_u16m2_tama(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vx_u16m2_tamu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vx_u16m2_tuma(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vrol.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vrol_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vx_u16m4_m(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vx_u16m4_ta(vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vx_u16m4_tu(vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vx_u16m4_tama(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vx_u16m4_tamu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vx_u16m4_tuma(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vrol.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vrol_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vx_u16m8_m(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vx_u16m8_ta(vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vx_u16m8_tu(vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vx_u16m8_tama(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vx_u16m8_tamu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vx_u16m8_tuma(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vrol.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vrol_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vrol_vx_u16m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vx_u32m1_m(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vx_u32m1_ta(vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vx_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vx_u32m1_tama(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vx_u32m1_tamu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vx_u32m1_tuma(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vrol.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vrol_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vx_u32m2_m(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vx_u32m2_ta(vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vx_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vx_u32m2_tama(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vx_u32m2_tamu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vx_u32m2_tuma(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vrol.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vrol_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vx_u32m4_m(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vx_u32m4_ta(vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vx_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vx_u32m4_tama(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vx_u32m4_tamu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vx_u32m4_tuma(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vrol.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vrol_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vx_u32m8_m(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vx_u32m8_ta(vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vx_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vx_u32m8_tama(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vx_u32m8_tamu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vx_u32m8_tuma(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vrol.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vrol_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vrol_vx_u32m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vx_u64m1_m(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vx_u64m1_ta(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vx_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vx_u64m1_tama(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vx_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vx_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vrol.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vrol_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vx_u64m2_m(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vx_u64m2_ta(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vx_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vx_u64m2_tama(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vx_u64m2_tamu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vx_u64m2_tuma(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vrol.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vrol_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vx_u64m4_m(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vx_u64m4_ta(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vx_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vx_u64m4_tama(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vx_u64m4_tamu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vx_u64m4_tuma(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vrol.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vrol_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vx_u64m8_m(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vx_u64m8_ta(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vx_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vx_u64m8_tama(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vx_u64m8_tamu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vx_u64m8_tuma(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vrol_vx_u64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vrol.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vrol_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vrol_vx_u64m8_tumu(mask, merge, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vror.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vror.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vror.c
@@ -0,0 +1,6341 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vv_i8mf8(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vror_vv_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vv_i8mf8_m(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vror_vv_i8mf8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vv_i8mf8_ta(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vror_vv_i8mf8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vv_i8mf8_tu(vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vror_vv_i8mf8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vv_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vror_vv_i8mf8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vv_i8mf8_tamu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vror_vv_i8mf8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vv_i8mf8_tuma(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vror_vv_i8mf8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vv_i8mf8_tumu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
+  return vror_vv_i8mf8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vv_i8mf4(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vror_vv_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vv_i8mf4_m(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vror_vv_i8mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vv_i8mf4_ta(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vror_vv_i8mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vv_i8mf4_tu(vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vror_vv_i8mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vv_i8mf4_tama(vbool32_t mask, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vror_vv_i8mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vv_i8mf4_tamu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vror_vv_i8mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vv_i8mf4_tuma(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vror_vv_i8mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vv_i8mf4_tumu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
+  return vror_vv_i8mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vv_i8mf2(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vror_vv_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vv_i8mf2_m(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vror_vv_i8mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vv_i8mf2_ta(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vror_vv_i8mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vv_i8mf2_tu(vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vror_vv_i8mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vv_i8mf2_tama(vbool16_t mask, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vror_vv_i8mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vv_i8mf2_tamu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vror_vv_i8mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vv_i8mf2_tuma(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vror_vv_i8mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vv_i8mf2_tumu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
+  return vror_vv_i8mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vv_i16mf4(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vror_vv_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vv_i16mf4_m(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vror_vv_i16mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vv_i16mf4_ta(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vror_vv_i16mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vv_i16mf4_tu(vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vror_vv_i16mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vv_i16mf4_tama(vbool64_t mask, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vror_vv_i16mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vv_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vror_vv_i16mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vv_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vror_vv_i16mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vv_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
+  return vror_vv_i16mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vv_i16mf2(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vror_vv_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vv_i16mf2_m(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vror_vv_i16mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vv_i16mf2_ta(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vror_vv_i16mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vv_i16mf2_tu(vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vror_vv_i16mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vv_i16mf2_tama(vbool32_t mask, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vror_vv_i16mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vv_i16mf2_tamu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vror_vv_i16mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vv_i16mf2_tuma(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vror_vv_i16mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vv_i16mf2_tumu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
+  return vror_vv_i16mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vv_i32mf2(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vror_vv_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vv_i32mf2_m(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vror_vv_i32mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vv_i32mf2_ta(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vror_vv_i32mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vv_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vror_vv_i32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vv_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vror_vv_i32mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vv_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vror_vv_i32mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vv_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vror_vv_i32mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vv_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
+  return vror_vv_i32mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vv_i8m1(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vror_vv_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vv_i8m1_m(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vror_vv_i8m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vv_i8m1_ta(vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vror_vv_i8m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vv_i8m1_tu(vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vror_vv_i8m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vv_i8m1_tama(vbool8_t mask, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vror_vv_i8m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vv_i8m1_tamu(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vror_vv_i8m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vv_i8m1_tuma(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vror_vv_i8m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vv_i8m1_tumu(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, vint8m1_t op2, size_t vl) {
+  return vror_vv_i8m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vv_i8m2(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vror_vv_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vv_i8m2_m(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vror_vv_i8m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vv_i8m2_ta(vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vror_vv_i8m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vv_i8m2_tu(vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vror_vv_i8m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vv_i8m2_tama(vbool4_t mask, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vror_vv_i8m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vv_i8m2_tamu(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vror_vv_i8m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vv_i8m2_tuma(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vror_vv_i8m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vv_i8m2_tumu(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, vint8m2_t op2, size_t vl) {
+  return vror_vv_i8m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vv_i8m4(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vror_vv_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vv_i8m4_m(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vror_vv_i8m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vv_i8m4_ta(vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vror_vv_i8m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vv_i8m4_tu(vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vror_vv_i8m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vv_i8m4_tama(vbool2_t mask, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vror_vv_i8m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vv_i8m4_tamu(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vror_vv_i8m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vv_i8m4_tuma(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vror_vv_i8m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vv_i8m4_tumu(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, vint8m4_t op2, size_t vl) {
+  return vror_vv_i8m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vv_i8m8(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vror_vv_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vv_i8m8_m(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vror_vv_i8m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vv_i8m8_ta(vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vror_vv_i8m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vv_i8m8_tu(vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vror_vv_i8m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vv_i8m8_tama(vbool1_t mask, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vror_vv_i8m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vv_i8m8_tamu(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vror_vv_i8m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vv_i8m8_tuma(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vror_vv_i8m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vv_i8m8_tumu(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, vint8m8_t op2, size_t vl) {
+  return vror_vv_i8m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vv_i16m1(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vror_vv_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vv_i16m1_m(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vror_vv_i16m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vv_i16m1_ta(vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vror_vv_i16m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vv_i16m1_tu(vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vror_vv_i16m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vv_i16m1_tama(vbool16_t mask, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vror_vv_i16m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vv_i16m1_tamu(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vror_vv_i16m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vv_i16m1_tuma(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vror_vv_i16m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vv_i16m1_tumu(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, vint16m1_t op2, size_t vl) {
+  return vror_vv_i16m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vv_i16m2(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vror_vv_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vv_i16m2_m(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vror_vv_i16m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vv_i16m2_ta(vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vror_vv_i16m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vv_i16m2_tu(vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vror_vv_i16m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vv_i16m2_tama(vbool8_t mask, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vror_vv_i16m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vv_i16m2_tamu(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vror_vv_i16m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vv_i16m2_tuma(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vror_vv_i16m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vv_i16m2_tumu(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, vint16m2_t op2, size_t vl) {
+  return vror_vv_i16m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vv_i16m4(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vror_vv_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vv_i16m4_m(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vror_vv_i16m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vv_i16m4_ta(vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vror_vv_i16m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vv_i16m4_tu(vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vror_vv_i16m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vv_i16m4_tama(vbool4_t mask, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vror_vv_i16m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vv_i16m4_tamu(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vror_vv_i16m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vv_i16m4_tuma(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vror_vv_i16m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vv_i16m4_tumu(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, vint16m4_t op2, size_t vl) {
+  return vror_vv_i16m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vv_i16m8(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vror_vv_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vv_i16m8_m(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vror_vv_i16m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vv_i16m8_ta(vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vror_vv_i16m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vv_i16m8_tu(vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vror_vv_i16m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vv_i16m8_tama(vbool2_t mask, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vror_vv_i16m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vv_i16m8_tamu(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vror_vv_i16m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vv_i16m8_tuma(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vror_vv_i16m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vv_i16m8_tumu(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, vint16m8_t op2, size_t vl) {
+  return vror_vv_i16m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vv_i32m1(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vror_vv_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vv_i32m1_m(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vror_vv_i32m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vv_i32m1_ta(vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vror_vv_i32m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vv_i32m1_tu(vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vror_vv_i32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vv_i32m1_tama(vbool32_t mask, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vror_vv_i32m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vv_i32m1_tamu(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vror_vv_i32m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vv_i32m1_tuma(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vror_vv_i32m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vv_i32m1_tumu(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, vint32m1_t op2, size_t vl) {
+  return vror_vv_i32m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vv_i32m2(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vror_vv_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vv_i32m2_m(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vror_vv_i32m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vv_i32m2_ta(vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vror_vv_i32m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vv_i32m2_tu(vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vror_vv_i32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vv_i32m2_tama(vbool16_t mask, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vror_vv_i32m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vv_i32m2_tamu(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vror_vv_i32m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vv_i32m2_tuma(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vror_vv_i32m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vv_i32m2_tumu(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, vint32m2_t op2, size_t vl) {
+  return vror_vv_i32m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vv_i32m4(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vror_vv_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vv_i32m4_m(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vror_vv_i32m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vv_i32m4_ta(vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vror_vv_i32m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vv_i32m4_tu(vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vror_vv_i32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vv_i32m4_tama(vbool8_t mask, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vror_vv_i32m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vv_i32m4_tamu(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vror_vv_i32m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vv_i32m4_tuma(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vror_vv_i32m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vv_i32m4_tumu(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, vint32m4_t op2, size_t vl) {
+  return vror_vv_i32m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vv_i32m8(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vror_vv_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vv_i32m8_m(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vror_vv_i32m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vv_i32m8_ta(vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vror_vv_i32m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vv_i32m8_tu(vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vror_vv_i32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vv_i32m8_tama(vbool4_t mask, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vror_vv_i32m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vv_i32m8_tamu(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vror_vv_i32m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vv_i32m8_tuma(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vror_vv_i32m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vv_i32m8_tumu(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, vint32m8_t op2, size_t vl) {
+  return vror_vv_i32m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vv_i64m1(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vror_vv_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vv_i64m1_m(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vror_vv_i64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vv_i64m1_ta(vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vror_vv_i64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vv_i64m1_tu(vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vror_vv_i64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vv_i64m1_tama(vbool64_t mask, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vror_vv_i64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vv_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vror_vv_i64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vv_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vror_vv_i64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vv_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, vint64m1_t op2, size_t vl) {
+  return vror_vv_i64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vv_i64m2(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vror_vv_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vv_i64m2_m(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vror_vv_i64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vv_i64m2_ta(vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vror_vv_i64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vv_i64m2_tu(vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vror_vv_i64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vv_i64m2_tama(vbool32_t mask, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vror_vv_i64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vv_i64m2_tamu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vror_vv_i64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vv_i64m2_tuma(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vror_vv_i64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vv_i64m2_tumu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, vint64m2_t op2, size_t vl) {
+  return vror_vv_i64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vv_i64m4(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vror_vv_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vv_i64m4_m(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vror_vv_i64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vv_i64m4_ta(vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vror_vv_i64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vv_i64m4_tu(vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vror_vv_i64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vv_i64m4_tama(vbool16_t mask, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vror_vv_i64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vv_i64m4_tamu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vror_vv_i64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vv_i64m4_tuma(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vror_vv_i64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vv_i64m4_tumu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, vint64m4_t op2, size_t vl) {
+  return vror_vv_i64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vv_i64m8(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vror_vv_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vv_i64m8_m(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vror_vv_i64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vv_i64m8_ta(vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vror_vv_i64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vv_i64m8_tu(vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vror_vv_i64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vv_i64m8_tama(vbool8_t mask, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vror_vv_i64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vv_i64m8_tamu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vror_vv_i64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vv_i64m8_tuma(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vror_vv_i64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_i64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vv_i64m8_tumu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, vint64m8_t op2, size_t vl) {
+  return vror_vv_i64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vv_u8mf8(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vror_vv_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vv_u8mf8_m(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vror_vv_u8mf8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vv_u8mf8_ta(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vror_vv_u8mf8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vv_u8mf8_tu(vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vror_vv_u8mf8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vv_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vror_vv_u8mf8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vv_u8mf8_tamu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vror_vv_u8mf8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vv_u8mf8_tuma(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vror_vv_u8mf8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vv_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
+  return vror_vv_u8mf8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vv_u8mf4(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vror_vv_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vv_u8mf4_m(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vror_vv_u8mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vv_u8mf4_ta(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vror_vv_u8mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vv_u8mf4_tu(vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vror_vv_u8mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vv_u8mf4_tama(vbool32_t mask, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vror_vv_u8mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vv_u8mf4_tamu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vror_vv_u8mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vv_u8mf4_tuma(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vror_vv_u8mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vv_u8mf4_tumu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
+  return vror_vv_u8mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vv_u8mf2(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vror_vv_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vv_u8mf2_m(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vror_vv_u8mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vv_u8mf2_ta(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vror_vv_u8mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vv_u8mf2_tu(vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vror_vv_u8mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vv_u8mf2_tama(vbool16_t mask, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vror_vv_u8mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vv_u8mf2_tamu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vror_vv_u8mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vv_u8mf2_tuma(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vror_vv_u8mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vv_u8mf2_tumu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
+  return vror_vv_u8mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vv_u16mf4(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vror_vv_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vror_vv_u16mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vv_u16mf4_ta(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vror_vv_u16mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vv_u16mf4_tu(vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vror_vv_u16mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vv_u16mf4_tama(vbool64_t mask, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vror_vv_u16mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vv_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vror_vv_u16mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vv_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vror_vv_u16mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vv_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
+  return vror_vv_u16mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vv_u16mf2(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vror_vv_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vror_vv_u16mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vv_u16mf2_ta(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vror_vv_u16mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vv_u16mf2_tu(vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vror_vv_u16mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vv_u16mf2_tama(vbool32_t mask, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vror_vv_u16mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vv_u16mf2_tamu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vror_vv_u16mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vv_u16mf2_tuma(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vror_vv_u16mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vv_u16mf2_tumu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
+  return vror_vv_u16mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vv_u32mf2(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vror_vv_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vror_vv_u32mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vv_u32mf2_ta(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vror_vv_u32mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vror_vv_u32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vv_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vror_vv_u32mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vv_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vror_vv_u32mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vv_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vror_vv_u32mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vv_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vror_vv_u32mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vv_u8m1(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vror_vv_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vv_u8m1_m(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vror_vv_u8m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vv_u8m1_ta(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vror_vv_u8m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vv_u8m1_tu(vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vror_vv_u8m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vv_u8m1_tama(vbool8_t mask, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vror_vv_u8m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vv_u8m1_tamu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vror_vv_u8m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vv_u8m1_tuma(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vror_vv_u8m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vv_u8m1_tumu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
+  return vror_vv_u8m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vv_u8m2(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vror_vv_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vv_u8m2_m(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vror_vv_u8m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vv_u8m2_ta(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vror_vv_u8m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vv_u8m2_tu(vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vror_vv_u8m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vv_u8m2_tama(vbool4_t mask, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vror_vv_u8m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vv_u8m2_tamu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vror_vv_u8m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vv_u8m2_tuma(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vror_vv_u8m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vv_u8m2_tumu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
+  return vror_vv_u8m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vv_u8m4(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vror_vv_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vv_u8m4_m(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vror_vv_u8m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vv_u8m4_ta(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vror_vv_u8m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vv_u8m4_tu(vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vror_vv_u8m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vv_u8m4_tama(vbool2_t mask, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vror_vv_u8m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vv_u8m4_tamu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vror_vv_u8m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vv_u8m4_tuma(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vror_vv_u8m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vv_u8m4_tumu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
+  return vror_vv_u8m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vv_u8m8(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vror_vv_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vv_u8m8_m(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vror_vv_u8m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vv_u8m8_ta(vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vror_vv_u8m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vv_u8m8_tu(vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vror_vv_u8m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vv_u8m8_tama(vbool1_t mask, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vror_vv_u8m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vv_u8m8_tamu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vror_vv_u8m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vv_u8m8_tuma(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vror_vv_u8m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.nxv64i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], <vscale x 64 x i8> [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vv_u8m8_tumu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, vuint8m8_t op2, size_t vl) {
+  return vror_vv_u8m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vv_u16m1(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vror_vv_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vv_u16m1_m(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vror_vv_u16m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vv_u16m1_ta(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vror_vv_u16m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vv_u16m1_tu(vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vror_vv_u16m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vv_u16m1_tama(vbool16_t mask, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vror_vv_u16m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vv_u16m1_tamu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vror_vv_u16m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vv_u16m1_tuma(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vror_vv_u16m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vv_u16m1_tumu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
+  return vror_vv_u16m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vv_u16m2(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vror_vv_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vv_u16m2_m(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vror_vv_u16m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vv_u16m2_ta(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vror_vv_u16m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vv_u16m2_tu(vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vror_vv_u16m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vv_u16m2_tama(vbool8_t mask, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vror_vv_u16m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vv_u16m2_tamu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vror_vv_u16m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vv_u16m2_tuma(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vror_vv_u16m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vv_u16m2_tumu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
+  return vror_vv_u16m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vv_u16m4(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vror_vv_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vv_u16m4_m(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vror_vv_u16m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vv_u16m4_ta(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vror_vv_u16m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vv_u16m4_tu(vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vror_vv_u16m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vv_u16m4_tama(vbool4_t mask, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vror_vv_u16m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vv_u16m4_tamu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vror_vv_u16m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vv_u16m4_tuma(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vror_vv_u16m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vv_u16m4_tumu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
+  return vror_vv_u16m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vv_u16m8(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vror_vv_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vv_u16m8_m(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vror_vv_u16m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vv_u16m8_ta(vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vror_vv_u16m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vv_u16m8_tu(vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vror_vv_u16m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vv_u16m8_tama(vbool2_t mask, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vror_vv_u16m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vv_u16m8_tamu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vror_vv_u16m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vv_u16m8_tuma(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vror_vv_u16m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.nxv32i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i16> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vv_u16m8_tumu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, vuint16m8_t op2, size_t vl) {
+  return vror_vv_u16m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vv_u32m1(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vror_vv_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vv_u32m1_m(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vror_vv_u32m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vv_u32m1_ta(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vror_vv_u32m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vror_vv_u32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vv_u32m1_tama(vbool32_t mask, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vror_vv_u32m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vv_u32m1_tamu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vror_vv_u32m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vv_u32m1_tuma(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vror_vv_u32m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vv_u32m1_tumu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vror_vv_u32m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vv_u32m2(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vror_vv_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vv_u32m2_m(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vror_vv_u32m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vv_u32m2_ta(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vror_vv_u32m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vror_vv_u32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vv_u32m2_tama(vbool16_t mask, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vror_vv_u32m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vv_u32m2_tamu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vror_vv_u32m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vv_u32m2_tuma(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vror_vv_u32m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vv_u32m2_tumu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vror_vv_u32m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vv_u32m4(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vror_vv_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vv_u32m4_m(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vror_vv_u32m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vv_u32m4_ta(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vror_vv_u32m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vror_vv_u32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vv_u32m4_tama(vbool8_t mask, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vror_vv_u32m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vv_u32m4_tamu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vror_vv_u32m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vv_u32m4_tuma(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vror_vv_u32m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vv_u32m4_tumu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vror_vv_u32m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vv_u32m8(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vror_vv_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vv_u32m8_m(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vror_vv_u32m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vv_u32m8_ta(vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vror_vv_u32m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vror_vv_u32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vv_u32m8_tama(vbool4_t mask, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vror_vv_u32m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vv_u32m8_tamu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vror_vv_u32m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vv_u32m8_tuma(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vror_vv_u32m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vv_u32m8_tumu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vror_vv_u32m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vv_u64m1(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vror_vv_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vv_u64m1_m(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vror_vv_u64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vv_u64m1_ta(vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vror_vv_u64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vv_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vror_vv_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vv_u64m1_tama(vbool64_t mask, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vror_vv_u64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vv_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vror_vv_u64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vv_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vror_vv_u64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vv_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vror_vv_u64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vv_u64m2(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vror_vv_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vv_u64m2_m(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vror_vv_u64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vv_u64m2_ta(vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vror_vv_u64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vv_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vror_vv_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vv_u64m2_tama(vbool32_t mask, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vror_vv_u64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vv_u64m2_tamu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vror_vv_u64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vv_u64m2_tuma(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vror_vv_u64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vv_u64m2_tumu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vror_vv_u64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vv_u64m4(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vror_vv_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vv_u64m4_m(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vror_vv_u64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vv_u64m4_ta(vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vror_vv_u64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vv_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vror_vv_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vv_u64m4_tama(vbool16_t mask, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vror_vv_u64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vv_u64m4_tamu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vror_vv_u64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vv_u64m4_tuma(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vror_vv_u64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vv_u64m4_tumu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vror_vv_u64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vv_u64m8(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vror_vv_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vv_u64m8_m(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vror_vv_u64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vv_u64m8_ta(vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vror_vv_u64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vv_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vror_vv_u64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vv_u64m8_tama(vbool8_t mask, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vror_vv_u64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vv_u64m8_tamu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vror_vv_u64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vv_u64m8_tuma(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vror_vv_u64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vv_u64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vv_u64m8_tumu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vror_vv_u64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vx_i8mf8(vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vx_i8mf8_m(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vx_i8mf8_ta(vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vx_i8mf8_tu(vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vx_i8mf8_tama(vbool64_t mask, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vx_i8mf8_tamu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vx_i8mf8_tuma(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vint8mf8_t test_vror_vx_i8mf8_tumu(vbool64_t mask, vint8mf8_t merge, vint8mf8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vx_i8mf4(vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vx_i8mf4_m(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vx_i8mf4_ta(vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vx_i8mf4_tu(vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vx_i8mf4_tama(vbool32_t mask, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vx_i8mf4_tamu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vx_i8mf4_tuma(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vint8mf4_t test_vror_vx_i8mf4_tumu(vbool32_t mask, vint8mf4_t merge, vint8mf4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vx_i8mf2(vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vx_i8mf2_m(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vx_i8mf2_ta(vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vx_i8mf2_tu(vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vx_i8mf2_tama(vbool16_t mask, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vx_i8mf2_tamu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vx_i8mf2_tuma(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vint8mf2_t test_vror_vx_i8mf2_tumu(vbool16_t mask, vint8mf2_t merge, vint8mf2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vx_i16mf4(vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vx_i16mf4_m(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vx_i16mf4_ta(vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vx_i16mf4_tu(vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vx_i16mf4_tama(vbool64_t mask, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vx_i16mf4_tamu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vx_i16mf4_tuma(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vint16mf4_t test_vror_vx_i16mf4_tumu(vbool64_t mask, vint16mf4_t merge, vint16mf4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vx_i16mf2(vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vx_i16mf2_m(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vx_i16mf2_ta(vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vx_i16mf2_tu(vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vx_i16mf2_tama(vbool32_t mask, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vx_i16mf2_tamu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vx_i16mf2_tuma(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vint16mf2_t test_vror_vx_i16mf2_tumu(vbool32_t mask, vint16mf2_t merge, vint16mf2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vx_i32mf2(vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vx_i32mf2_m(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vx_i32mf2_ta(vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vx_i32mf2_tu(vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vx_i32mf2_tama(vbool64_t mask, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vx_i32mf2_tamu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vx_i32mf2_tuma(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vint32mf2_t test_vror_vx_i32mf2_tumu(vbool64_t mask, vint32mf2_t merge, vint32mf2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vx_i8m1(vint8m1_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vx_i8m1_m(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vx_i8m1_ta(vint8m1_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vx_i8m1_tu(vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vx_i8m1_tama(vbool8_t mask, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vx_i8m1_tamu(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vx_i8m1_tuma(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vint8m1_t test_vror_vx_i8m1_tumu(vbool8_t mask, vint8m1_t merge, vint8m1_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vx_i8m2(vint8m2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vx_i8m2_m(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vx_i8m2_ta(vint8m2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vx_i8m2_tu(vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vx_i8m2_tama(vbool4_t mask, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vx_i8m2_tamu(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vx_i8m2_tuma(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vint8m2_t test_vror_vx_i8m2_tumu(vbool4_t mask, vint8m2_t merge, vint8m2_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vx_i8m4(vint8m4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vx_i8m4_m(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vx_i8m4_ta(vint8m4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vx_i8m4_tu(vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vx_i8m4_tama(vbool2_t mask, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vx_i8m4_tamu(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vx_i8m4_tuma(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vint8m4_t test_vror_vx_i8m4_tumu(vbool2_t mask, vint8m4_t merge, vint8m4_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vx_i8m8(vint8m8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vx_i8m8_m(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vx_i8m8_ta(vint8m8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vx_i8m8_tu(vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vx_i8m8_tama(vbool1_t mask, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vx_i8m8_tamu(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vx_i8m8_tuma(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vint8m8_t test_vror_vx_i8m8_tumu(vbool1_t mask, vint8m8_t merge, vint8m8_t op1, int8_t op2, size_t vl) {
+  return vror_vx_i8m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vx_i16m1(vint16m1_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vx_i16m1_m(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vx_i16m1_ta(vint16m1_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vx_i16m1_tu(vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vx_i16m1_tama(vbool16_t mask, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vx_i16m1_tamu(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vx_i16m1_tuma(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vint16m1_t test_vror_vx_i16m1_tumu(vbool16_t mask, vint16m1_t merge, vint16m1_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vx_i16m2(vint16m2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vx_i16m2_m(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vx_i16m2_ta(vint16m2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vx_i16m2_tu(vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vx_i16m2_tama(vbool8_t mask, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vx_i16m2_tamu(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vx_i16m2_tuma(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vint16m2_t test_vror_vx_i16m2_tumu(vbool8_t mask, vint16m2_t merge, vint16m2_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vx_i16m4(vint16m4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vx_i16m4_m(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vx_i16m4_ta(vint16m4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vx_i16m4_tu(vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vx_i16m4_tama(vbool4_t mask, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vx_i16m4_tamu(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vx_i16m4_tuma(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vint16m4_t test_vror_vx_i16m4_tumu(vbool4_t mask, vint16m4_t merge, vint16m4_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vx_i16m8(vint16m8_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vx_i16m8_m(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vx_i16m8_ta(vint16m8_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vx_i16m8_tu(vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vx_i16m8_tama(vbool2_t mask, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vx_i16m8_tamu(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vx_i16m8_tuma(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vint16m8_t test_vror_vx_i16m8_tumu(vbool2_t mask, vint16m8_t merge, vint16m8_t op1, int16_t op2, size_t vl) {
+  return vror_vx_i16m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vx_i32m1(vint32m1_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vx_i32m1_m(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vx_i32m1_ta(vint32m1_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vx_i32m1_tu(vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vx_i32m1_tama(vbool32_t mask, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vx_i32m1_tamu(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vx_i32m1_tuma(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vint32m1_t test_vror_vx_i32m1_tumu(vbool32_t mask, vint32m1_t merge, vint32m1_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vx_i32m2(vint32m2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vx_i32m2_m(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vx_i32m2_ta(vint32m2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vx_i32m2_tu(vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vx_i32m2_tama(vbool16_t mask, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vx_i32m2_tamu(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vx_i32m2_tuma(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vint32m2_t test_vror_vx_i32m2_tumu(vbool16_t mask, vint32m2_t merge, vint32m2_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vx_i32m4(vint32m4_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vx_i32m4_m(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vx_i32m4_ta(vint32m4_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vx_i32m4_tu(vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vx_i32m4_tama(vbool8_t mask, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vx_i32m4_tamu(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vx_i32m4_tuma(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vint32m4_t test_vror_vx_i32m4_tumu(vbool8_t mask, vint32m4_t merge, vint32m4_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vx_i32m8(vint32m8_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vx_i32m8_m(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vx_i32m8_ta(vint32m8_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vx_i32m8_tu(vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vx_i32m8_tama(vbool4_t mask, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vx_i32m8_tamu(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vx_i32m8_tuma(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vint32m8_t test_vror_vx_i32m8_tumu(vbool4_t mask, vint32m8_t merge, vint32m8_t op1, int32_t op2, size_t vl) {
+  return vror_vx_i32m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vx_i64m1(vint64m1_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vx_i64m1_m(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vx_i64m1_ta(vint64m1_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vx_i64m1_tu(vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vx_i64m1_tama(vbool64_t mask, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vx_i64m1_tamu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vx_i64m1_tuma(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vint64m1_t test_vror_vx_i64m1_tumu(vbool64_t mask, vint64m1_t merge, vint64m1_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vx_i64m2(vint64m2_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vx_i64m2_m(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vx_i64m2_ta(vint64m2_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vx_i64m2_tu(vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vx_i64m2_tama(vbool32_t mask, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vx_i64m2_tamu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vx_i64m2_tuma(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vint64m2_t test_vror_vx_i64m2_tumu(vbool32_t mask, vint64m2_t merge, vint64m2_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vx_i64m4(vint64m4_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vx_i64m4_m(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vx_i64m4_ta(vint64m4_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vx_i64m4_tu(vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vx_i64m4_tama(vbool16_t mask, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vx_i64m4_tamu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vx_i64m4_tuma(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vint64m4_t test_vror_vx_i64m4_tumu(vbool16_t mask, vint64m4_t merge, vint64m4_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vx_i64m8(vint64m8_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vx_i64m8_m(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vx_i64m8_ta(vint64m8_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vx_i64m8_tu(vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vx_i64m8_tama(vbool8_t mask, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vx_i64m8_tamu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vx_i64m8_tuma(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_i64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vint64m8_t test_vror_vx_i64m8_tumu(vbool8_t mask, vint64m8_t merge, vint64m8_t op1, int64_t op2, size_t vl) {
+  return vror_vx_i64m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vx_u8mf8(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vx_u8mf8_ta(vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vx_u8mf8_tu(vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i8.i64(<vscale x 1 x i8> undef, <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vx_u8mf8_tama(vbool64_t mask, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vx_u8mf8_tamu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vx_u8mf8_tuma(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vror.mask.nxv1i8.i8.i64(<vscale x 1 x i8> [[MERGE:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
+//
+vuint8mf8_t test_vror_vx_u8mf8_tumu(vbool64_t mask, vuint8mf8_t merge, vuint8mf8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vx_u8mf4(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vx_u8mf4_ta(vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vx_u8mf4_tu(vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i8.i64(<vscale x 2 x i8> undef, <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vx_u8mf4_tama(vbool32_t mask, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vx_u8mf4_tamu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vx_u8mf4_tuma(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vror.mask.nxv2i8.i8.i64(<vscale x 2 x i8> [[MERGE:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
+//
+vuint8mf4_t test_vror_vx_u8mf4_tumu(vbool32_t mask, vuint8mf4_t merge, vuint8mf4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vx_u8mf2(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vx_u8mf2_ta(vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vx_u8mf2_tu(vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i8.i64(<vscale x 4 x i8> undef, <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vx_u8mf2_tama(vbool16_t mask, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vx_u8mf2_tamu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vx_u8mf2_tuma(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vror.mask.nxv4i8.i8.i64(<vscale x 4 x i8> [[MERGE:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
+//
+vuint8mf2_t test_vror_vx_u8mf2_tumu(vbool16_t mask, vuint8mf2_t merge, vuint8mf2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vx_u16mf4(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vx_u16mf4_ta(vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vx_u16mf4_tu(vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i16.i64(<vscale x 1 x i16> undef, <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vx_u16mf4_tama(vbool64_t mask, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vx_u16mf4_tamu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vx_u16mf4_tuma(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vror.mask.nxv1i16.i16.i64(<vscale x 1 x i16> [[MERGE:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
+//
+vuint16mf4_t test_vror_vx_u16mf4_tumu(vbool64_t mask, vuint16mf4_t merge, vuint16mf4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vx_u16mf2(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vx_u16mf2_ta(vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vx_u16mf2_tu(vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i16.i64(<vscale x 2 x i16> undef, <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vx_u16mf2_tama(vbool32_t mask, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vx_u16mf2_tamu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vx_u16mf2_tuma(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vror.mask.nxv2i16.i16.i64(<vscale x 2 x i16> [[MERGE:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
+//
+vuint16mf2_t test_vror_vx_u16mf2_tumu(vbool32_t mask, vuint16mf2_t merge, vuint16mf2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vx_u32mf2(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32mf2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32mf2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32mf2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vx_u32mf2_ta(vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32mf2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vx_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32mf2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i32.i64(<vscale x 1 x i32> undef, <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vx_u32mf2_tama(vbool64_t mask, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32mf2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32mf2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vx_u32mf2_tamu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32mf2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32mf2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vx_u32mf2_tuma(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32mf2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32mf2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vror.mask.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vror_vx_u32mf2_tumu(vbool64_t mask, vuint32mf2_t merge, vuint32mf2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32mf2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vx_u8m1(vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vx_u8m1_m(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vx_u8m1_ta(vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vx_u8m1_tu(vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i8.i64(<vscale x 8 x i8> undef, <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vx_u8m1_tama(vbool8_t mask, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vx_u8m1_tamu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vx_u8m1_tuma(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vror.mask.nxv8i8.i8.i64(<vscale x 8 x i8> [[MERGE:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
+//
+vuint8m1_t test_vror_vx_u8m1_tumu(vbool8_t mask, vuint8m1_t merge, vuint8m1_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vx_u8m2(vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vx_u8m2_m(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vx_u8m2_ta(vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vx_u8m2_tu(vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i8.i64(<vscale x 16 x i8> undef, <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vx_u8m2_tama(vbool4_t mask, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vx_u8m2_tamu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vx_u8m2_tuma(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vror.mask.nxv16i8.i8.i64(<vscale x 16 x i8> [[MERGE:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
+//
+vuint8m2_t test_vror_vx_u8m2_tumu(vbool4_t mask, vuint8m2_t merge, vuint8m2_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vx_u8m4(vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vx_u8m4_m(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vx_u8m4_ta(vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vx_u8m4_tu(vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i8.i64(<vscale x 32 x i8> undef, <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vx_u8m4_tama(vbool2_t mask, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vx_u8m4_tamu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vx_u8m4_tuma(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vror.mask.nxv32i8.i8.i64(<vscale x 32 x i8> [[MERGE:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
+//
+vuint8m4_t test_vror_vx_u8m4_tumu(vbool2_t mask, vuint8m4_t merge, vuint8m4_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vx_u8m8(vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vx_u8m8_m(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vx_u8m8_ta(vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vx_u8m8_tu(vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i8.i64(<vscale x 64 x i8> undef, <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vx_u8m8_tama(vbool1_t mask, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vx_u8m8_tamu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vx_u8m8_tuma(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u8m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vror.mask.nxv64i8.i8.i64(<vscale x 64 x i8> [[MERGE:%.*]], <vscale x 64 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
+//
+vuint8m8_t test_vror_vx_u8m8_tumu(vbool1_t mask, vuint8m8_t merge, vuint8m8_t op1, uint8_t op2, size_t vl) {
+  return vror_vx_u8m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vx_u16m1(vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vx_u16m1_m(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vx_u16m1_ta(vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vx_u16m1_tu(vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i16.i64(<vscale x 4 x i16> undef, <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vx_u16m1_tama(vbool16_t mask, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vx_u16m1_tamu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vx_u16m1_tuma(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vror.mask.nxv4i16.i16.i64(<vscale x 4 x i16> [[MERGE:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
+//
+vuint16m1_t test_vror_vx_u16m1_tumu(vbool16_t mask, vuint16m1_t merge, vuint16m1_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vx_u16m2(vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vx_u16m2_m(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vx_u16m2_ta(vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vx_u16m2_tu(vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i16.i64(<vscale x 8 x i16> undef, <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vx_u16m2_tama(vbool8_t mask, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vx_u16m2_tamu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vx_u16m2_tuma(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vror.mask.nxv8i16.i16.i64(<vscale x 8 x i16> [[MERGE:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
+//
+vuint16m2_t test_vror_vx_u16m2_tumu(vbool8_t mask, vuint16m2_t merge, vuint16m2_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vx_u16m4(vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vx_u16m4_m(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vx_u16m4_ta(vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vx_u16m4_tu(vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i16.i64(<vscale x 16 x i16> undef, <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vx_u16m4_tama(vbool4_t mask, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vx_u16m4_tamu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vx_u16m4_tuma(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vror.mask.nxv16i16.i16.i64(<vscale x 16 x i16> [[MERGE:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
+//
+vuint16m4_t test_vror_vx_u16m4_tumu(vbool4_t mask, vuint16m4_t merge, vuint16m4_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vx_u16m8(vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vx_u16m8_m(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vx_u16m8_ta(vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vx_u16m8_tu(vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i16.i64(<vscale x 32 x i16> undef, <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vx_u16m8_tama(vbool2_t mask, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vx_u16m8_tamu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vx_u16m8_tuma(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u16m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vror.mask.nxv32i16.i16.i64(<vscale x 32 x i16> [[MERGE:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
+//
+vuint16m8_t test_vror_vx_u16m8_tumu(vbool2_t mask, vuint16m8_t merge, vuint16m8_t op1, uint16_t op2, size_t vl) {
+  return vror_vx_u16m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vx_u32m1(vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vx_u32m1_m(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vx_u32m1_ta(vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vx_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i32.i64(<vscale x 2 x i32> undef, <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vx_u32m1_tama(vbool32_t mask, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vx_u32m1_tamu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vx_u32m1_tuma(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vror.mask.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vror_vx_u32m1_tumu(vbool32_t mask, vuint32m1_t merge, vuint32m1_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vx_u32m2(vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vx_u32m2_m(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vx_u32m2_ta(vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vx_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i32.i64(<vscale x 4 x i32> undef, <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vx_u32m2_tama(vbool16_t mask, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vx_u32m2_tamu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vx_u32m2_tuma(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vror.mask.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vror_vx_u32m2_tumu(vbool16_t mask, vuint32m2_t merge, vuint32m2_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vx_u32m4(vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vx_u32m4_m(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vx_u32m4_ta(vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vx_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i32.i64(<vscale x 8 x i32> undef, <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vx_u32m4_tama(vbool8_t mask, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vx_u32m4_tamu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vx_u32m4_tuma(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vror.mask.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vror_vx_u32m4_tumu(vbool8_t mask, vuint32m4_t merge, vuint32m4_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vx_u32m8(vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vx_u32m8_m(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vx_u32m8_ta(vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vx_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i32.i64(<vscale x 16 x i32> undef, <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vx_u32m8_tama(vbool4_t mask, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vx_u32m8_tamu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vx_u32m8_tuma(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u32m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vror.mask.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vror_vx_u32m8_tumu(vbool4_t mask, vuint32m8_t merge, vuint32m8_t op1, uint32_t op2, size_t vl) {
+  return vror_vx_u32m8_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vx_u64m1(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m1(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m1_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vx_u64m1_m(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m1_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vx_u64m1_ta(vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m1_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vx_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m1_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> undef, <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vx_u64m1_tama(vbool64_t mask, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m1_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m1_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vx_u64m1_tamu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m1_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m1_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vx_u64m1_tuma(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m1_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m1_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vror.mask.nxv1i64.i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vror_vx_u64m1_tumu(vbool64_t mask, vuint64m1_t merge, vuint64m1_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m1_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vx_u64m2(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m2(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m2_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vx_u64m2_m(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m2_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vx_u64m2_ta(vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m2_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vx_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m2_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> undef, <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vx_u64m2_tama(vbool32_t mask, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m2_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m2_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vx_u64m2_tamu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m2_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m2_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vx_u64m2_tuma(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m2_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m2_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vror.mask.nxv2i64.i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vror_vx_u64m2_tumu(vbool32_t mask, vuint64m2_t merge, vuint64m2_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m2_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vx_u64m4(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m4(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m4_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vx_u64m4_m(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m4_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vx_u64m4_ta(vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m4_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vx_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m4_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> undef, <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vx_u64m4_tama(vbool16_t mask, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m4_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m4_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vx_u64m4_tamu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m4_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m4_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vx_u64m4_tuma(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m4_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m4_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vror.mask.nxv4i64.i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vror_vx_u64m4_tumu(vbool16_t mask, vuint64m4_t merge, vuint64m4_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m4_tumu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vx_u64m8(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m8(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m8_m(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vx_u64m8_m(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m8_m(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vx_u64m8_ta(vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m8_ta(op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], i64 [[VL:%.*]])
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vx_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m8_tama(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> undef, <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 3)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vx_u64m8_tama(vbool8_t mask, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m8_tama(mask, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m8_tamu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vx_u64m8_tamu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m8_tamu(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m8_tuma(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 2)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vx_u64m8_tuma(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m8_tuma(mask, merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vror_vx_u64m8_tumu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vror.mask.nxv8i64.i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i64 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vror_vx_u64m8_tumu(vbool8_t mask, vuint64m8_t merge, vuint64m8_t op1, uint64_t op2, size_t vl) {
+  return vror_vx_u64m8_tumu(mask, merge, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsha2ch.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsha2ch.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsha2ch.c
@@ -0,0 +1,248 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsha2ch.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsha2ch_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vsha2ch_vv_u32mf2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsha2ch.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsha2ch_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vsha2ch_vv_u32mf2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsha2ch.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsha2ch_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vsha2ch_vv_u32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsha2ch.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsha2ch_vv_u32m1(vuint32m1_t vd, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vsha2ch_vv_u32m1(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsha2ch.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsha2ch_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vsha2ch_vv_u32m1_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsha2ch.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsha2ch_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vsha2ch_vv_u32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsha2ch.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsha2ch_vv_u32m2(vuint32m2_t vd, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vsha2ch_vv_u32m2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsha2ch.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsha2ch_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vsha2ch_vv_u32m2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsha2ch.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsha2ch_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vsha2ch_vv_u32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsha2ch.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsha2ch_vv_u32m4(vuint32m4_t vd, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vsha2ch_vv_u32m4(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsha2ch.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsha2ch_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vsha2ch_vv_u32m4_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsha2ch.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsha2ch_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vsha2ch_vv_u32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsha2ch.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsha2ch_vv_u32m8(vuint32m8_t vd, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vsha2ch_vv_u32m8(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsha2ch.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsha2ch_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vsha2ch_vv_u32m8_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsha2ch.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsha2ch_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vsha2ch_vv_u32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsha2ch.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsha2ch_vv_u64m1(vuint64m1_t vd, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vsha2ch_vv_u64m1(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsha2ch.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsha2ch_vv_u64m1_ta(vuint64m1_t vd, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vsha2ch_vv_u64m1_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsha2ch.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsha2ch_vv_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vsha2ch_vv_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsha2ch.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsha2ch_vv_u64m2(vuint64m2_t vd, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vsha2ch_vv_u64m2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsha2ch.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsha2ch_vv_u64m2_ta(vuint64m2_t vd, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vsha2ch_vv_u64m2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsha2ch.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsha2ch_vv_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vsha2ch_vv_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsha2ch.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsha2ch_vv_u64m4(vuint64m4_t vd, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vsha2ch_vv_u64m4(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsha2ch.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsha2ch_vv_u64m4_ta(vuint64m4_t vd, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vsha2ch_vv_u64m4_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsha2ch.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsha2ch_vv_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vsha2ch_vv_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsha2ch.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsha2ch_vv_u64m8(vuint64m8_t vd, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vsha2ch_vv_u64m8(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsha2ch.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsha2ch_vv_u64m8_ta(vuint64m8_t vd, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vsha2ch_vv_u64m8_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ch_vv_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsha2ch.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsha2ch_vv_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vsha2ch_vv_u64m8_tu(merge, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsha2cl.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsha2cl.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsha2cl.c
@@ -0,0 +1,248 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsha2cl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsha2cl_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vsha2cl_vv_u32mf2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsha2cl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsha2cl_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vsha2cl_vv_u32mf2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsha2cl.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsha2cl_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vsha2cl_vv_u32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsha2cl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsha2cl_vv_u32m1(vuint32m1_t vd, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vsha2cl_vv_u32m1(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsha2cl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsha2cl_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vsha2cl_vv_u32m1_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsha2cl.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsha2cl_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vsha2cl_vv_u32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsha2cl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsha2cl_vv_u32m2(vuint32m2_t vd, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vsha2cl_vv_u32m2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsha2cl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsha2cl_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vsha2cl_vv_u32m2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsha2cl.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsha2cl_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vsha2cl_vv_u32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsha2cl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsha2cl_vv_u32m4(vuint32m4_t vd, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vsha2cl_vv_u32m4(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsha2cl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsha2cl_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vsha2cl_vv_u32m4_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsha2cl.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsha2cl_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vsha2cl_vv_u32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsha2cl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsha2cl_vv_u32m8(vuint32m8_t vd, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vsha2cl_vv_u32m8(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsha2cl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsha2cl_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vsha2cl_vv_u32m8_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsha2cl.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsha2cl_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vsha2cl_vv_u32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsha2cl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsha2cl_vv_u64m1(vuint64m1_t vd, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vsha2cl_vv_u64m1(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsha2cl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsha2cl_vv_u64m1_ta(vuint64m1_t vd, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vsha2cl_vv_u64m1_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsha2cl.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsha2cl_vv_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vsha2cl_vv_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsha2cl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsha2cl_vv_u64m2(vuint64m2_t vd, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vsha2cl_vv_u64m2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsha2cl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsha2cl_vv_u64m2_ta(vuint64m2_t vd, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vsha2cl_vv_u64m2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsha2cl.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsha2cl_vv_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vsha2cl_vv_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsha2cl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsha2cl_vv_u64m4(vuint64m4_t vd, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vsha2cl_vv_u64m4(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsha2cl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsha2cl_vv_u64m4_ta(vuint64m4_t vd, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vsha2cl_vv_u64m4_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsha2cl.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsha2cl_vv_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vsha2cl_vv_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsha2cl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsha2cl_vv_u64m8(vuint64m8_t vd, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vsha2cl_vv_u64m8(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsha2cl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsha2cl_vv_u64m8_ta(vuint64m8_t vd, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vsha2cl_vv_u64m8_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2cl_vv_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsha2cl.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsha2cl_vv_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vsha2cl_vv_u64m8_tu(merge, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsha2ms.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsha2ms.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsha2ms.c
@@ -0,0 +1,248 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsha2ms.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsha2ms_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vsha2ms_vv_u32mf2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsha2ms.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsha2ms_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vsha2ms_vv_u32mf2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsha2ms.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsha2ms_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vsha2ms_vv_u32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsha2ms.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsha2ms_vv_u32m1(vuint32m1_t vd, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vsha2ms_vv_u32m1(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsha2ms.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsha2ms_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vsha2ms_vv_u32m1_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsha2ms.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsha2ms_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vsha2ms_vv_u32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsha2ms.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsha2ms_vv_u32m2(vuint32m2_t vd, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vsha2ms_vv_u32m2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsha2ms.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsha2ms_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vsha2ms_vv_u32m2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsha2ms.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsha2ms_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vsha2ms_vv_u32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsha2ms.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsha2ms_vv_u32m4(vuint32m4_t vd, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vsha2ms_vv_u32m4(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsha2ms.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsha2ms_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vsha2ms_vv_u32m4_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsha2ms.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsha2ms_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vsha2ms_vv_u32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsha2ms.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsha2ms_vv_u32m8(vuint32m8_t vd, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vsha2ms_vv_u32m8(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsha2ms.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsha2ms_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vsha2ms_vv_u32m8_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsha2ms.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsha2ms_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vsha2ms_vv_u32m8_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u64m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsha2ms.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsha2ms_vv_u64m1(vuint64m1_t vd, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vsha2ms_vv_u64m1(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u64m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsha2ms.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[VD:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsha2ms_vv_u64m1_ta(vuint64m1_t vd, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vsha2ms_vv_u64m1_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u64m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vsha2ms.nxv1i64.nxv1i64.i64(<vscale x 1 x i64> [[MERGE:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
+//
+vuint64m1_t test_vsha2ms_vv_u64m1_tu(vuint64m1_t merge, vuint64m1_t op1, vuint64m1_t op2, size_t vl) {
+  return vsha2ms_vv_u64m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u64m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsha2ms.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsha2ms_vv_u64m2(vuint64m2_t vd, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vsha2ms_vv_u64m2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u64m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsha2ms.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[VD:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsha2ms_vv_u64m2_ta(vuint64m2_t vd, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vsha2ms_vv_u64m2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u64m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vsha2ms.nxv2i64.nxv2i64.i64(<vscale x 2 x i64> [[MERGE:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
+//
+vuint64m2_t test_vsha2ms_vv_u64m2_tu(vuint64m2_t merge, vuint64m2_t op1, vuint64m2_t op2, size_t vl) {
+  return vsha2ms_vv_u64m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u64m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsha2ms.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsha2ms_vv_u64m4(vuint64m4_t vd, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vsha2ms_vv_u64m4(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u64m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsha2ms.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[VD:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsha2ms_vv_u64m4_ta(vuint64m4_t vd, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vsha2ms_vv_u64m4_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u64m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vsha2ms.nxv4i64.nxv4i64.i64(<vscale x 4 x i64> [[MERGE:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
+//
+vuint64m4_t test_vsha2ms_vv_u64m4_tu(vuint64m4_t merge, vuint64m4_t op1, vuint64m4_t op2, size_t vl) {
+  return vsha2ms_vv_u64m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u64m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsha2ms.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsha2ms_vv_u64m8(vuint64m8_t vd, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vsha2ms_vv_u64m8(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u64m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsha2ms.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[VD:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsha2ms_vv_u64m8_ta(vuint64m8_t vd, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vsha2ms_vv_u64m8_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsha2ms_vv_u64m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vsha2ms.nxv8i64.nxv8i64.i64(<vscale x 8 x i64> [[MERGE:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i64> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
+//
+vuint64m8_t test_vsha2ms_vv_u64m8_tu(vuint64m8_t merge, vuint64m8_t op1, vuint64m8_t op2, size_t vl) {
+  return vsha2ms_vv_u64m8_tu(merge, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsm3c.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsm3c.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsm3c.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm3c.nxv1i32.i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsm3c_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t op1, size_t vl) {
+ return vsm3c_vi_u32mf2(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm3c.nxv1i32.i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsm3c_vi_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t op1, size_t vl) {
+ return vsm3c_vi_u32mf2_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm3c.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsm3c_vi_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
+ return vsm3c_vi_u32mf2_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm3c.nxv2i32.i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsm3c_vi_u32m1(vuint32m1_t vd, vuint32m1_t op1, size_t vl) {
+  return vsm3c_vi_u32m1(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm3c.nxv2i32.i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsm3c_vi_u32m1_ta(vuint32m1_t vd, vuint32m1_t op1, size_t vl) {
+  return vsm3c_vi_u32m1_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm3c.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsm3c_vi_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, size_t vl) {
+  return vsm3c_vi_u32m1_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm3c.nxv4i32.i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsm3c_vi_u32m2(vuint32m2_t vd, vuint32m2_t op1, size_t vl) {
+  return vsm3c_vi_u32m2(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm3c.nxv4i32.i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsm3c_vi_u32m2_ta(vuint32m2_t vd, vuint32m2_t op1, size_t vl) {
+  return vsm3c_vi_u32m2_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm3c.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsm3c_vi_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, size_t vl) {
+  return vsm3c_vi_u32m2_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm3c.nxv8i32.i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsm3c_vi_u32m4(vuint32m4_t vd, vuint32m4_t op1, size_t vl) {
+  return vsm3c_vi_u32m4(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm3c.nxv8i32.i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsm3c_vi_u32m4_ta(vuint32m4_t vd, vuint32m4_t op1, size_t vl) {
+  return vsm3c_vi_u32m4_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm3c.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsm3c_vi_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, size_t vl) {
+  return vsm3c_vi_u32m4_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm3c.nxv16i32.i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsm3c_vi_u32m8(vuint32m8_t vd, vuint32m8_t op1, size_t vl) {
+  return vsm3c_vi_u32m8(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm3c.nxv16i32.i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsm3c_vi_u32m8_ta(vuint32m8_t vd, vuint32m8_t op1, size_t vl) {
+  return vsm3c_vi_u32m8_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3c_vi_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm3c.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsm3c_vi_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, size_t vl) {
+  return vsm3c_vi_u32m8_tu(merge, op1, 8, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsm3me.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsm3me.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsm3me.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm3me.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsm3me_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vsm3me_vv_u32mf2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm3me.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsm3me_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vsm3me_vv_u32mf2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm3me.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsm3me_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
+  return vsm3me_vv_u32mf2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm3me.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsm3me_vv_u32m1(vuint32m1_t vd, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vsm3me_vv_u32m1(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm3me.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsm3me_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vsm3me_vv_u32m1_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm3me.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsm3me_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
+  return vsm3me_vv_u32m1_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm3me.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsm3me_vv_u32m2(vuint32m2_t vd, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vsm3me_vv_u32m2(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm3me.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsm3me_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vsm3me_vv_u32m2_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm3me.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsm3me_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
+  return vsm3me_vv_u32m2_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm3me.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsm3me_vv_u32m4(vuint32m4_t vd, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vsm3me_vv_u32m4(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm3me.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsm3me_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vsm3me_vv_u32m4_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm3me.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsm3me_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
+  return vsm3me_vv_u32m4_tu(merge, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm3me.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsm3me_vv_u32m8(vuint32m8_t vd, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vsm3me_vv_u32m8(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm3me.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsm3me_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vsm3me_vv_u32m8_ta(vd, op1, op2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm3me_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm3me.nxv16i32.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i32> [[OP2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsm3me_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, vuint32m8_t op2, size_t vl) {
+  return vsm3me_vv_u32m8_tu(merge, op1, op2, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsm4k.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsm4k.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsm4k.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm4k.nxv1i32.i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsm4k_vi_u32mf2(vuint32mf2_t vd, vuint32mf2_t op1, size_t vl) {
+ return vsm4k_vi_u32mf2(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm4k.nxv1i32.i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsm4k_vi_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t op1, size_t vl) {
+ return vsm4k_vi_u32mf2_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm4k.nxv1i32.i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsm4k_vi_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t op1, size_t vl) {
+ return vsm4k_vi_u32mf2_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm4k.nxv2i32.i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsm4k_vi_u32m1(vuint32m1_t vd, vuint32m1_t op1, size_t vl) {
+  return vsm4k_vi_u32m1(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm4k.nxv2i32.i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsm4k_vi_u32m1_ta(vuint32m1_t vd, vuint32m1_t op1, size_t vl) {
+  return vsm4k_vi_u32m1_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm4k.nxv2i32.i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsm4k_vi_u32m1_tu(vuint32m1_t merge, vuint32m1_t op1, size_t vl) {
+  return vsm4k_vi_u32m1_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm4k.nxv4i32.i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsm4k_vi_u32m2(vuint32m2_t vd, vuint32m2_t op1, size_t vl) {
+  return vsm4k_vi_u32m2(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm4k.nxv4i32.i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsm4k_vi_u32m2_ta(vuint32m2_t vd, vuint32m2_t op1, size_t vl) {
+  return vsm4k_vi_u32m2_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm4k.nxv4i32.i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsm4k_vi_u32m2_tu(vuint32m2_t merge, vuint32m2_t op1, size_t vl) {
+  return vsm4k_vi_u32m2_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm4k.nxv8i32.i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsm4k_vi_u32m4(vuint32m4_t vd, vuint32m4_t op1, size_t vl) {
+  return vsm4k_vi_u32m4(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm4k.nxv8i32.i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsm4k_vi_u32m4_ta(vuint32m4_t vd, vuint32m4_t op1, size_t vl) {
+  return vsm4k_vi_u32m4_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm4k.nxv8i32.i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsm4k_vi_u32m4_tu(vuint32m4_t merge, vuint32m4_t op1, size_t vl) {
+  return vsm4k_vi_u32m4_tu(merge, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm4k.nxv16i32.i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsm4k_vi_u32m8(vuint32m8_t vd, vuint32m8_t op1, size_t vl) {
+  return vsm4k_vi_u32m8(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm4k.nxv16i32.i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsm4k_vi_u32m8_ta(vuint32m8_t vd, vuint32m8_t op1, size_t vl) {
+  return vsm4k_vi_u32m8_ta(vd, op1, 8, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4k_vi_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm4k.nxv16i32.i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i32 8, i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsm4k_vi_u32m8_tu(vuint32m8_t merge, vuint32m8_t op1, size_t vl) {
+  return vsm4k_vi_u32m8_tu(merge, op1, 8, vl);
+}
diff --git a/clang/test/CodeGen/RISCV/rvv-intrinsics/vsm4r.c b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsm4r.c
new file mode 100644
--- /dev/null
+++ b/clang/test/CodeGen/RISCV/rvv-intrinsics/vsm4r.c
@@ -0,0 +1,140 @@
+// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
+// REQUIRES: riscv-registered-target
+// RUN: %clang_cc1 -triple riscv64 -target-feature +v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
+
+#include <riscv_vector.h>
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32mf2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm4r.vv.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsm4r_vv_u32mf2(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vsm4r_vv_u32mf2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32mf2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm4r.vv.nxv1i32.i64(<vscale x 1 x i32> [[VD:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsm4r_vv_u32mf2_ta(vuint32mf2_t vd, vuint32mf2_t vs2, size_t vl) {
+  return vsm4r_vv_u32mf2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32mf2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vsm4r.vv.nxv1i32.i64(<vscale x 1 x i32> [[MERGE:%.*]], <vscale x 1 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
+//
+vuint32mf2_t test_vsm4r_vv_u32mf2_tu(vuint32mf2_t merge, vuint32mf2_t vs2, size_t vl) {
+  return vsm4r_vv_u32mf2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32m1(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm4r.vv.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsm4r_vv_u32m1(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vsm4r_vv_u32m1(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32m1_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm4r.vv.nxv2i32.i64(<vscale x 2 x i32> [[VD:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsm4r_vv_u32m1_ta(vuint32m1_t vd, vuint32m1_t vs2, size_t vl) {
+  return vsm4r_vv_u32m1_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32m1_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vsm4r.vv.nxv2i32.i64(<vscale x 2 x i32> [[MERGE:%.*]], <vscale x 2 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
+//
+vuint32m1_t test_vsm4r_vv_u32m1_tu(vuint32m1_t merge, vuint32m1_t vs2, size_t vl) {
+  return vsm4r_vv_u32m1_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32m2(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm4r.vv.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsm4r_vv_u32m2(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vsm4r_vv_u32m2(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32m2_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm4r.vv.nxv4i32.i64(<vscale x 4 x i32> [[VD:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsm4r_vv_u32m2_ta(vuint32m2_t vd, vuint32m2_t vs2, size_t vl) {
+  return vsm4r_vv_u32m2_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32m2_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vsm4r.vv.nxv4i32.i64(<vscale x 4 x i32> [[MERGE:%.*]], <vscale x 4 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
+//
+vuint32m2_t test_vsm4r_vv_u32m2_tu(vuint32m2_t merge, vuint32m2_t vs2, size_t vl) {
+  return vsm4r_vv_u32m2_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32m4(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm4r.vv.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsm4r_vv_u32m4(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vsm4r_vv_u32m4(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32m4_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm4r.vv.nxv8i32.i64(<vscale x 8 x i32> [[VD:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsm4r_vv_u32m4_ta(vuint32m4_t vd, vuint32m4_t vs2, size_t vl) {
+  return vsm4r_vv_u32m4_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32m4_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vsm4r.vv.nxv8i32.i64(<vscale x 8 x i32> [[MERGE:%.*]], <vscale x 8 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
+//
+vuint32m4_t test_vsm4r_vv_u32m4_tu(vuint32m4_t merge, vuint32m4_t vs2, size_t vl) {
+  return vsm4r_vv_u32m4_tu(merge, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32m8(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm4r.vv.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsm4r_vv_u32m8(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vsm4r_vv_u32m8(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32m8_ta(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm4r.vv.nxv16i32.i64(<vscale x 16 x i32> [[VD:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 1)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsm4r_vv_u32m8_ta(vuint32m8_t vd, vuint32m8_t vs2, size_t vl) {
+  return vsm4r_vv_u32m8_ta(vd, vs2, vl);
+}
+
+// CHECK-RV64-LABEL: @test_vsm4r_vv_u32m8_tu(
+// CHECK-RV64-NEXT:  entry:
+// CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vsm4r.vv.nxv16i32.i64(<vscale x 16 x i32> [[MERGE:%.*]], <vscale x 16 x i32> [[VS2:%.*]], i64 [[VL:%.*]], i64 0)
+// CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
+//
+vuint32m8_t test_vsm4r_vv_u32m8_tu(vuint32m8_t merge, vuint32m8_t vs2, size_t vl) {
+  return vsm4r_vv_u32m8_tu(merge, vs2, vl);
+}