diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td
--- a/clang/include/clang/Basic/riscv_vector.td
+++ b/clang/include/clang/Basic/riscv_vector.td
@@ -205,7 +205,7 @@
   string HeaderCode = "";
 
   // Sub extension of vector spec. Currently only support Zvamo or Zvlsseg.
-  string RequiredExtension = "";
+  list<string> RequiredExtensions = [];
 
   // Number of fields for Zvlsseg.
   int NF = 1;
@@ -673,7 +673,7 @@
         Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo());
       }] in {
       foreach type = TypeList in {
-        foreach eew_list = EEWList in {
+        foreach eew_list = EEWList[0-2] in {
           defvar eew = eew_list[0];
           defvar eew_type = eew_list[1];
           let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in {
@@ -683,6 +683,15 @@
               }
           }
         }
+        defvar eew64 = "64";
+        defvar eew64_type = "(Log2EEW:6)";
+        let Name = op # eew64 # "_v", IRName = op, IRNameMask = op # "_mask",
+            RequiredExtensions = ["RV64"] in {
+            def: RVVBuiltin<"v", "vPCe" # eew64_type # "Uv", type>;
+              if !not(IsFloat<type>.val) then {
+                def: RVVBuiltin<"Uv", "UvPCUe" # eew64_type # "Uv", type>;
+              }
+          }
       }
   }
 }
@@ -760,7 +769,7 @@
         IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[4]->getType()};
       }] in {
       foreach type = TypeList in {
-        foreach eew_list = EEWList in {
+        foreach eew_list = EEWList[0-2] in {
           defvar eew = eew_list[0];
           defvar eew_type = eew_list[1];
           let Name = op # eew  # "_v", IRName = op, IRNameMask = op # "_mask" in  {
@@ -770,6 +779,15 @@
             }
           }
         }
+        defvar eew64 = "64";
+        defvar eew64_type = "(Log2EEW:6)";
+        let Name = op # eew64  # "_v", IRName = op, IRNameMask = op # "_mask",
+            RequiredExtensions = ["RV64"]  in  {
+          def : RVVBuiltin<"v", "0Pe" # eew64_type # "Uvv", type>;
+          if !not(IsFloat<type>.val) then {
+            def : RVVBuiltin<"Uv", "0PUe" # eew64_type # "UvUv", type>;
+          }
+        }
       }
   }
 }
@@ -1247,13 +1265,13 @@
 defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>;
 
 // 7.8 Vector Load/Store Segment Instructions
-let RequiredExtension = "Zvlsseg" in {
+let RequiredExtensions = ["Zvlsseg"] in {
 defm : RVVUnitStridedSegLoad<"vlseg">;
 defm : RVVUnitStridedSegLoadFF<"vlseg">;
 }
 
 // 8. Vector AMO Operations
-let RequiredExtension = "Zvamo" in {
+let RequiredExtensions = ["Zvamo"] in {
 defm vamoswap : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true, /* hasFP */ true>;
 defm vamoadd : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
 defm vamoxor : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>;
diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp
--- a/clang/utils/TableGen/RISCVVEmitter.cpp
+++ b/clang/utils/TableGen/RISCVVEmitter.cpp
@@ -142,6 +142,7 @@
   Zfh = 1 << 3,
   Zvamo = 1 << 4,
   Zvlsseg = 1 << 5,
+  RV64 = 1 << 6,
 };
 
 // TODO refactor RVVIntrinsic class design after support all intrinsic
@@ -175,7 +176,7 @@
                bool HasNoMaskedOverloaded, bool HasAutoDef,
                StringRef ManualCodegen, const RVVTypes &Types,
                const std::vector<int64_t> &IntrinsicTypes,
-               StringRef RequiredExtension, unsigned NF);
+               const std::vector<StringRef> &RequiredExtensions, unsigned NF);
   ~RVVIntrinsic() = default;
 
   StringRef getName() const { return Name; }
@@ -760,7 +761,8 @@
                            bool HasNoMaskedOverloaded, bool HasAutoDef,
                            StringRef ManualCodegen, const RVVTypes &OutInTypes,
                            const std::vector<int64_t> &NewIntrinsicTypes,
-                           StringRef RequiredExtension, unsigned NF)
+                           const std::vector<StringRef> &RequiredExtensions,
+                           unsigned NF)
     : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask),
       HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL),
       HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef),
@@ -788,10 +790,14 @@
     else if (T->isFloatVector(64) || T->isFloat(64))
       RISCVExtensions |= RISCVExtension::D;
   }
-  if (RequiredExtension == "Zvamo")
-    RISCVExtensions |= RISCVExtension::Zvamo;
-  if (RequiredExtension == "Zvlsseg")
-    RISCVExtensions |= RISCVExtension::Zvlsseg;
+  for (auto Extension : RequiredExtensions) {
+    if (Extension == "Zvamo")
+      RISCVExtensions |= RISCVExtension::Zvamo;
+    if (Extension == "Zvlsseg")
+      RISCVExtensions |= RISCVExtension::Zvlsseg;
+    if (Extension == "RV64")
+      RISCVExtensions |= RISCVExtension::RV64;
+  }
 
   // Init OutputType and InputTypes
   OutputType = OutInTypes[0];
@@ -1091,7 +1097,8 @@
     StringRef ManualCodegenMask = R->getValueAsString("ManualCodegenMask");
     std::vector<int64_t> IntrinsicTypes =
         R->getValueAsListOfInts("IntrinsicTypes");
-    StringRef RequiredExtension = R->getValueAsString("RequiredExtension");
+    std::vector<StringRef> RequiredExtensions =
+        R->getValueAsListOfStrings("RequiredExtensions");
     StringRef IRName = R->getValueAsString("IRName");
     StringRef IRNameMask = R->getValueAsString("IRNameMask");
     unsigned NF = R->getValueAsInt("NF");
@@ -1159,7 +1166,7 @@
             Name, SuffixStr, MangledName, MangledSuffixStr, IRName,
             HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false,
             HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen,
-            Types.getValue(), IntrinsicTypes, RequiredExtension, NF));
+            Types.getValue(), IntrinsicTypes, RequiredExtensions, NF));
         if (HasMask) {
           // Create a mask intrinsic
           Optional<RVVTypes> MaskTypes =
@@ -1168,7 +1175,7 @@
               Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask,
               HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL,
               HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask,
-              MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF));
+              MaskTypes.getValue(), IntrinsicTypes, RequiredExtensions, NF));
         }
       } // end for Log2LMULList
     }   // end for TypeRange
@@ -1249,6 +1256,8 @@
     OS << LS << "defined(__riscv_zvamo)";
   if (Extents & RISCVExtension::Zvlsseg)
     OS << LS << "defined(__riscv_zvlsseg)";
+  if (Extents & RISCVExtension::RV64)
+    OS << LS << "(__riscv_xlen == 64)";
   OS << "\n";
   return true;
 }
diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
--- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp
@@ -345,6 +345,10 @@
 
   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
+  if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
+    report_fatal_error("The V extension does not support EEW=64 for index "
+                       "values when XLEN=32");
+  }
   const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo(
       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
       static_cast<unsigned>(IndexLMUL));
@@ -422,6 +426,10 @@
 
   RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
   unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
+  if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
+    report_fatal_error("The V extension does not support EEW=64 for index "
+                       "values when XLEN=32");
+  }
   const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo(
       NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
       static_cast<unsigned>(IndexLMUL));
@@ -888,6 +896,10 @@
       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
+      if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
+        report_fatal_error("The V extension does not support EEW=64 for index "
+                           "values when XLEN=32");
+      }
       const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo(
           IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
           static_cast<unsigned>(IndexLMUL));
@@ -1077,6 +1089,10 @@
       RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT);
       RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT);
       unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits());
+      if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) {
+        report_fatal_error("The V extension does not support EEW=64 for index "
+                           "values when XLEN=32");
+      }
       const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo(
           IsMasked, IsOrdered, IndexLog2EEW, static_cast<unsigned>(LMUL),
           static_cast<unsigned>(IndexLMUL));
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -4540,6 +4540,12 @@
   } else
     VL = DAG.getRegister(RISCV::X0, XLenVT);
 
+  // Truncate i64 vector to i32 vector in RV32.
+  if (IndexVT.getVectorElementType() == MVT::i64 && XLenVT == MVT::i32) {
+    IndexVT = MVT::getScalableVectorVT(XLenVT, IndexVT.getVectorNumElements());
+    Index = DAG.getZExtOrTrunc(Index, DL, IndexVT);
+  }
+
   unsigned IntID =
       IsUnmasked ? Intrinsic::riscv_vloxei : Intrinsic::riscv_vloxei_mask;
   SmallVector<SDValue, 8> Ops{MGN->getChain(),
@@ -4621,6 +4627,12 @@
   } else
     VL = DAG.getRegister(RISCV::X0, XLenVT);
 
+  // Truncate i64 vector to i32 vector in RV32.
+  if (IndexVT.getVectorElementType() == MVT::i64 && XLenVT == MVT::i32) {
+    IndexVT = MVT::getScalableVectorVT(XLenVT, IndexVT.getVectorNumElements());
+    Index = DAG.getZExtOrTrunc(Index, DL, IndexVT);
+  }
+
   unsigned IntID =
       IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
   SmallVector<SDValue, 8> Ops{MSN->getChain(),
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td
@@ -492,23 +492,29 @@
 def VLUXEI8_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth8, "vluxei8.v">;
 def VLUXEI16_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth16, "vluxei16.v">;
 def VLUXEI32_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth32, "vluxei32.v">;
-def VLUXEI64_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth64, "vluxei64.v">;
 
 def VLOXEI8_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth8, "vloxei8.v">;
 def VLOXEI16_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth16, "vloxei16.v">;
 def VLOXEI32_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth32, "vloxei32.v">;
-def VLOXEI64_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth64, "vloxei64.v">;
 
 def VSUXEI8_V : VIndexedStore<MOPSTIndexedUnord, LSWidth8, "vsuxei8.v">;
 def VSUXEI16_V : VIndexedStore<MOPSTIndexedUnord, LSWidth16, "vsuxei16.v">;
 def VSUXEI32_V : VIndexedStore<MOPSTIndexedUnord, LSWidth32, "vsuxei32.v">;
-def VSUXEI64_V : VIndexedStore<MOPSTIndexedUnord, LSWidth64, "vsuxei64.v">;
 
 def VSOXEI8_V : VIndexedStore<MOPSTIndexedOrder, LSWidth8, "vsoxei8.v">;
 def VSOXEI16_V : VIndexedStore<MOPSTIndexedOrder, LSWidth16, "vsoxei16.v">;
 def VSOXEI32_V : VIndexedStore<MOPSTIndexedOrder, LSWidth32, "vsoxei32.v">;
+} // Predicates = [HasStdExtV]
+
+let Predicates = [HasStdExtV, IsRV64] in {
+// Vector Indexed Instructions with EEW=64 for index values
+def VLUXEI64_V : VIndexedLoad<MOPLDIndexedUnord, LSWidth64, "vluxei64.v">;
+def VLOXEI64_V : VIndexedLoad<MOPLDIndexedOrder, LSWidth64, "vloxei64.v">;
+def VSUXEI64_V : VIndexedStore<MOPSTIndexedUnord, LSWidth64, "vsuxei64.v">;
 def VSOXEI64_V : VIndexedStore<MOPSTIndexedOrder, LSWidth64, "vsoxei64.v">;
+} // Predicates = [HasStdExtV, IsRV64]
 
+let Predicates = [HasStdExtV] in {
 defm VL1R : VWholeLoad<0, "vl1r", VR>;
 defm VL2R : VWholeLoad<1, "vl2r", VRM2>;
 defm VL4R : VWholeLoad<3, "vl4r", VRM4>;
@@ -1053,8 +1059,8 @@
 } // hasSideEffects = 0, mayLoad = 0, mayStore = 0
 } // Predicates = [HasStdExtV]
 
-let Predicates = [HasStdExtZvlsseg] in {
-  foreach nf=2-8 in {
+foreach nf=2-8 in {
+  let Predicates = [HasStdExtZvlsseg] in {
     def VLSEG#nf#E8_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth8, "vlseg"#nf#"e8.v">;
     def VLSEG#nf#E16_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth16, "vlseg"#nf#"e16.v">;
     def VLSEG#nf#E32_V : VUnitStrideSegmentLoad<!add(nf, -1), LUMOPUnitStride, LSWidth32, "vlseg"#nf#"e32.v">;
@@ -1088,8 +1094,6 @@
                               LSWidth16, "vluxseg"#nf#"ei16.v">;
     def VLUXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
                               LSWidth32, "vluxseg"#nf#"ei32.v">;
-    def VLUXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
-                              LSWidth64, "vluxseg"#nf#"ei64.v">;
 
     def VLOXSEG#nf#EI8_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
                              LSWidth8, "vloxseg"#nf#"ei8.v">;
@@ -1097,8 +1101,6 @@
                               LSWidth16, "vloxseg"#nf#"ei16.v">;
     def VLOXSEG#nf#EI32_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
                               LSWidth32, "vloxseg"#nf#"ei32.v">;
-    def VLOXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
-                              LSWidth64, "vloxseg"#nf#"ei64.v">;
 
     def VSUXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
                              LSWidth8, "vsuxseg"#nf#"ei8.v">;
@@ -1106,8 +1108,6 @@
                               LSWidth16, "vsuxseg"#nf#"ei16.v">;
     def VSUXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
                               LSWidth32, "vsuxseg"#nf#"ei32.v">;
-    def VSUXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
-                              LSWidth64, "vsuxseg"#nf#"ei64.v">;
 
     def VSOXSEG#nf#EI8_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
                              LSWidth8, "vsoxseg"#nf#"ei8.v">;
@@ -1115,10 +1115,21 @@
                               LSWidth16, "vsoxseg"#nf#"ei16.v">;
     def VSOXSEG#nf#EI32_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
                               LSWidth32, "vsoxseg"#nf#"ei32.v">;
+  } // Predicates = [HasStdExtZvlsseg]
+
+  // Vector Indexed Instructions with EEW=64 for index values
+  let Predicates = [HasStdExtZvlsseg, IsRV64] in {
+    def VLUXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedUnord,
+                              LSWidth64, "vluxseg"#nf#"ei64.v">;
+    def VLOXSEG#nf#EI64_V : VIndexedSegmentLoad<!add(nf, -1), MOPLDIndexedOrder,
+                              LSWidth64, "vloxseg"#nf#"ei64.v">;
+    def VSUXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedUnord,
+                              LSWidth64, "vsuxseg"#nf#"ei64.v">;
     def VSOXSEG#nf#EI64_V : VIndexedSegmentStore<!add(nf, -1), MOPSTIndexedOrder,
                               LSWidth64, "vsoxseg"#nf#"ei64.v">;
-  }
-} // Predicates = [HasStdExtZvlsseg]
+  } // Predicates = [HasStdExtZvlsseg, IsRV64]
+}
+
 
 let Predicates = [HasStdExtZvamo, HasStdExtA] in {
   defm VAMOSWAPEI8 : VAMO<AMOOPVamoSwap, LSWidth8, "vamoswapei8.v">;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-gather.ll
@@ -1044,8 +1044,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsext.vf8 v28, v8
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -1070,8 +1072,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vzext.vf8 v28, v8
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -1121,8 +1125,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsext.vf4 v28, v8
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -1147,8 +1153,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vzext.vf4 v28, v8
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -1197,8 +1205,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsext.vf2 v28, v8
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -1223,8 +1233,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vzext.vf2 v28, v8
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -1248,8 +1260,10 @@
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v28, v8, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -1917,8 +1931,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsext.vf8 v28, v8
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -1943,8 +1959,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vzext.vf8 v28, v8
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -1994,8 +2012,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsext.vf4 v28, v8
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -2020,8 +2040,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vzext.vf4 v28, v8
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -2070,8 +2092,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsext.vf2 v28, v8
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -2096,8 +2120,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vzext.vf2 v28, v8
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
@@ -2121,8 +2147,10 @@
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v28, v8, 3
-; RV32-NEXT:    vsetvli zero, zero, e64, m4, tu, mu
-; RV32-NEXT:    vloxei64.v v12, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, tu, mu
+; RV32-NEXT:    vloxei32.v v12, (a0), v26, v0.t
 ; RV32-NEXT:    vmv4r.v v8, v12
 ; RV32-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
--- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-masked-scatter.ll
@@ -842,7 +842,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsext.vf8 v28, v12
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8i64:
@@ -864,7 +867,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vzext.vf8 v28, v12
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8i64:
@@ -908,7 +914,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsext.vf4 v28, v12
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_sext_v8i16_v8i64:
@@ -930,7 +939,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vzext.vf4 v28, v12
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_zext_v8i16_v8i64:
@@ -973,7 +985,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsext.vf2 v28, v12
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_sext_v8i32_v8i64:
@@ -995,7 +1010,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vzext.vf2 v28, v12
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_zext_v8i32_v8i64:
@@ -1016,7 +1034,10 @@
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v28, v12, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_v8i64:
@@ -1615,7 +1636,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsext.vf8 v28, v12
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_sext_v8i8_v8f64:
@@ -1637,7 +1661,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vzext.vf8 v28, v12
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_zext_v8i8_v8f64:
@@ -1681,7 +1708,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsext.vf4 v28, v12
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_sext_v8i16_v8f64:
@@ -1703,7 +1733,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vzext.vf4 v28, v12
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_zext_v8i16_v8f64:
@@ -1746,7 +1779,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsext.vf2 v28, v12
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_sext_v8i32_v8f64:
@@ -1768,7 +1804,10 @@
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vzext.vf2 v28, v12
 ; RV32-NEXT:    vsll.vi v28, v28, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_zext_v8i32_v8f64:
@@ -1789,7 +1828,10 @@
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
 ; RV32-NEXT:    vsll.vi v28, v12, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v28, v0.t
+; RV32-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
+; RV32-NEXT:    vnsrl.wi v26, v28, 0
+; RV32-NEXT:    vsetivli zero, 8, e64, m4, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v26, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_v8f64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/invalid-eew.ll b/llvm/test/CodeGen/RISCV/rvv/invalid-eew.ll
new file mode 100644
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/rvv/invalid-eew.ll
@@ -0,0 +1,3924 @@
+; RUN: not --crash llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
+; RUN:   < %s 2>&1 | FileCheck %s
+
+; CHECK: LLVM ERROR: The V extension does not support EEW=64 for index values when XLEN=32
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
+  <vscale x 1 x i8>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
+    <vscale x 1 x i8>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
+  <vscale x 2 x i8>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
+    <vscale x 2 x i8>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
+  <vscale x 4 x i8>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
+    <vscale x 4 x i8>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
+  <vscale x 8 x i8>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
+    <vscale x 8 x i8>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
+  <vscale x 1 x i16>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
+    <vscale x 1 x i16>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
+  <vscale x 2 x i16>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
+    <vscale x 2 x i16>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
+  <vscale x 4 x i16>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
+    <vscale x 4 x i16>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
+  <vscale x 8 x i16>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
+    <vscale x 8 x i16>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
+  <vscale x 1 x i32>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
+    <vscale x 1 x i32>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
+  <vscale x 2 x i32>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
+    <vscale x 2 x i32>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
+  <vscale x 4 x i32>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
+    <vscale x 4 x i32>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
+  <vscale x 8 x i32>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
+    <vscale x 8 x i32>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
+    <vscale x 1 x i64>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
+    <vscale x 2 x i64>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
+    <vscale x 4 x i64>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
+    <vscale x 8 x i64>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
+  <vscale x 1 x half>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
+    <vscale x 1 x half>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
+  <vscale x 2 x half>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
+    <vscale x 2 x half>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
+  <vscale x 4 x half>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
+    <vscale x 4 x half>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
+  <vscale x 8 x half>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
+    <vscale x 8 x half>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
+  <vscale x 1 x float>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
+    <vscale x 1 x float>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
+  <vscale x 2 x float>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
+    <vscale x 2 x float>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
+  <vscale x 4 x float>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
+    <vscale x 4 x float>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
+  <vscale x 8 x float>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
+    <vscale x 8 x float>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
+  <vscale x 1 x double>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
+    <vscale x 1 x double>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
+  <vscale x 2 x double>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
+    <vscale x 2 x double>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
+  <vscale x 4 x double>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
+    <vscale x 4 x double>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
+  <vscale x 8 x double>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
+    <vscale x 8 x double>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
+  <vscale x 1 x i8>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
+    <vscale x 1 x i8>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
+  <vscale x 2 x i8>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
+    <vscale x 2 x i8>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
+  <vscale x 4 x i8>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
+    <vscale x 4 x i8>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
+  <vscale x 8 x i8>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
+    <vscale x 8 x i8>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i8> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
+  <vscale x 1 x i16>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
+    <vscale x 1 x i16>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
+  <vscale x 2 x i16>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
+    <vscale x 2 x i16>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
+  <vscale x 4 x i16>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
+    <vscale x 4 x i16>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
+  <vscale x 8 x i16>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
+    <vscale x 8 x i16>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i16> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
+  <vscale x 1 x i32>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
+    <vscale x 1 x i32>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
+  <vscale x 2 x i32>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
+    <vscale x 2 x i32>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
+  <vscale x 4 x i32>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
+    <vscale x 4 x i32>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
+  <vscale x 8 x i32>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
+    <vscale x 8 x i32>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i32> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
+    <vscale x 1 x i64>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
+    <vscale x 2 x i64>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
+    <vscale x 4 x i64>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
+    <vscale x 8 x i64>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x i64> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
+  <vscale x 1 x half>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
+    <vscale x 1 x half>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
+  <vscale x 2 x half>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
+    <vscale x 2 x half>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
+  <vscale x 4 x half>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
+    <vscale x 4 x half>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
+  <vscale x 8 x half>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
+    <vscale x 8 x half>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x half> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
+  <vscale x 1 x float>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
+    <vscale x 1 x float>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
+  <vscale x 2 x float>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
+    <vscale x 2 x float>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
+  <vscale x 4 x float>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
+    <vscale x 4 x float>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
+  <vscale x 8 x float>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
+    <vscale x 8 x float>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x float> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
+  <vscale x 1 x double>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
+    <vscale x 1 x double>* %0,
+    <vscale x 1 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 1 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
+  <vscale x 2 x double>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
+    <vscale x 2 x double>* %0,
+    <vscale x 2 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 2 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
+  <vscale x 4 x double>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
+    <vscale x 4 x double>* %0,
+    <vscale x 4 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 4 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
+  <vscale x 8 x double>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
+    <vscale x 8 x double>* %0,
+    <vscale x 8 x i64> %1,
+    i32 %2)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret <vscale x 8 x double> %a
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
+  <vscale x 1 x i8>,
+  <vscale x 1 x i8>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
+    <vscale x 1 x i8> %0,
+    <vscale x 1 x i8>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
+  <vscale x 2 x i8>,
+  <vscale x 2 x i8>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
+    <vscale x 2 x i8> %0,
+    <vscale x 2 x i8>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
+  <vscale x 4 x i8>,
+  <vscale x 4 x i8>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
+    <vscale x 4 x i8> %0,
+    <vscale x 4 x i8>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
+  <vscale x 8 x i8>,
+  <vscale x 8 x i8>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
+    <vscale x 8 x i8> %0,
+    <vscale x 8 x i8>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
+  <vscale x 1 x i16>,
+  <vscale x 1 x i16>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
+    <vscale x 1 x i16> %0,
+    <vscale x 1 x i16>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
+  <vscale x 2 x i16>,
+  <vscale x 2 x i16>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
+    <vscale x 2 x i16> %0,
+    <vscale x 2 x i16>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
+  <vscale x 4 x i16>,
+  <vscale x 4 x i16>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
+    <vscale x 4 x i16> %0,
+    <vscale x 4 x i16>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
+  <vscale x 8 x i16>,
+  <vscale x 8 x i16>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
+    <vscale x 8 x i16> %0,
+    <vscale x 8 x i16>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
+  <vscale x 1 x i32>,
+  <vscale x 1 x i32>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
+    <vscale x 1 x i32> %0,
+    <vscale x 1 x i32>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
+  <vscale x 2 x i32>,
+  <vscale x 2 x i32>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
+    <vscale x 2 x i32> %0,
+    <vscale x 2 x i32>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
+  <vscale x 4 x i32>,
+  <vscale x 4 x i32>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
+    <vscale x 4 x i32> %0,
+    <vscale x 4 x i32>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
+  <vscale x 8 x i32>,
+  <vscale x 8 x i32>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
+    <vscale x 8 x i32> %0,
+    <vscale x 8 x i32>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
+  <vscale x 1 x i64>,
+  <vscale x 1 x i64>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
+    <vscale x 1 x i64> %0,
+    <vscale x 1 x i64>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
+  <vscale x 2 x i64>,
+  <vscale x 2 x i64>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
+    <vscale x 2 x i64> %0,
+    <vscale x 2 x i64>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
+  <vscale x 4 x i64>,
+  <vscale x 4 x i64>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
+    <vscale x 4 x i64> %0,
+    <vscale x 4 x i64>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
+  <vscale x 8 x i64>,
+  <vscale x 8 x i64>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
+    <vscale x 8 x i64> %0,
+    <vscale x 8 x i64>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
+  <vscale x 1 x half>,
+  <vscale x 1 x half>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
+    <vscale x 1 x half> %0,
+    <vscale x 1 x half>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
+  <vscale x 2 x half>,
+  <vscale x 2 x half>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
+    <vscale x 2 x half> %0,
+    <vscale x 2 x half>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
+  <vscale x 4 x half>,
+  <vscale x 4 x half>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
+    <vscale x 4 x half> %0,
+    <vscale x 4 x half>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
+  <vscale x 8 x half>,
+  <vscale x 8 x half>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
+    <vscale x 8 x half> %0,
+    <vscale x 8 x half>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
+  <vscale x 1 x float>,
+  <vscale x 1 x float>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
+    <vscale x 1 x float> %0,
+    <vscale x 1 x float>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
+  <vscale x 2 x float>,
+  <vscale x 2 x float>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
+    <vscale x 2 x float> %0,
+    <vscale x 2 x float>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
+  <vscale x 4 x float>,
+  <vscale x 4 x float>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
+    <vscale x 4 x float> %0,
+    <vscale x 4 x float>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
+  <vscale x 8 x float>,
+  <vscale x 8 x float>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
+    <vscale x 8 x float> %0,
+    <vscale x 8 x float>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>*,
+  <vscale x 1 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    <vscale x 1 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
+  <vscale x 1 x double>,
+  <vscale x 1 x double>*,
+  <vscale x 1 x i64>,
+  <vscale x 1 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
+    <vscale x 1 x double> %0,
+    <vscale x 1 x double>* %1,
+    <vscale x 1 x i64> %2,
+    <vscale x 1 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>*,
+  <vscale x 2 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double>* %1,
+    <vscale x 2 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
+  <vscale x 2 x double>,
+  <vscale x 2 x double>*,
+  <vscale x 2 x i64>,
+  <vscale x 2 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
+    <vscale x 2 x double> %0,
+    <vscale x 2 x double>* %1,
+    <vscale x 2 x i64> %2,
+    <vscale x 2 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>*,
+  <vscale x 4 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double>* %1,
+    <vscale x 4 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
+  <vscale x 4 x double>,
+  <vscale x 4 x double>*,
+  <vscale x 4 x i64>,
+  <vscale x 4 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
+    <vscale x 4 x double> %0,
+    <vscale x 4 x double>* %1,
+    <vscale x 4 x i64> %2,
+    <vscale x 4 x i1> %3,
+    i32 %4)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>*,
+  <vscale x 8 x i64>,
+  i32);
+
+define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double>* %1,
+    <vscale x 8 x i64> %2,
+    i32 %3)
+
+  ret void
+}
+
+declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
+  <vscale x 8 x double>,
+  <vscale x 8 x double>*,
+  <vscale x 8 x i64>,
+  <vscale x 8 x i1>,
+  i32);
+
+define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
+entry:
+  call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
+    <vscale x 8 x double> %0,
+    <vscale x 8 x double>* %1,
+    <vscale x 8 x i64> %2,
+    <vscale x 8 x i1> %3,
+    i32 %4)
+
+  ret void
+}
diff --git a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
--- a/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mgather-sdnode.ll
@@ -1050,8 +1050,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -1076,8 +1078,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vzext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -1127,8 +1131,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -1153,8 +1159,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vzext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -1203,8 +1211,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -1229,8 +1239,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vzext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -1254,8 +1266,10 @@
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -1989,8 +2003,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -2015,8 +2031,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vzext.vf8 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -2066,8 +2084,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -2092,8 +2112,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vzext.vf4 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -2142,8 +2164,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -2168,8 +2192,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vzext.vf2 v24, v8
 ; RV32-NEXT:    vsll.vi v8, v24, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
@@ -2193,8 +2219,10 @@
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v8, v8, 3
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v8, 0
 ; RV32-NEXT:    vsetvli zero, zero, e64, m8, tu, mu
-; RV32-NEXT:    vloxei64.v v16, (a0), v8, v0.t
+; RV32-NEXT:    vloxei32.v v16, (a0), v28, v0.t
 ; RV32-NEXT:    vmv8r.v v8, v16
 ; RV32-NEXT:    ret
 ;
diff --git a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
--- a/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/mscatter-sdnode.ll
@@ -842,7 +842,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8i64:
@@ -864,7 +867,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vzext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8i64:
@@ -908,7 +914,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8i64:
@@ -930,7 +939,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vzext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8i64:
@@ -973,7 +985,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8i64:
@@ -995,7 +1010,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vzext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8i64:
@@ -1016,7 +1034,10 @@
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_nxv8i64:
@@ -1615,7 +1636,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_sext_nxv8i8_nxv8f64:
@@ -1637,7 +1661,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vzext.vf8 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_zext_nxv8i8_nxv8f64:
@@ -1681,7 +1708,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_sext_nxv8i16_nxv8f64:
@@ -1703,7 +1733,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vzext.vf4 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_zext_nxv8i16_nxv8f64:
@@ -1746,7 +1779,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_sext_nxv8i32_nxv8f64:
@@ -1768,7 +1804,10 @@
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vzext.vf2 v24, v16
 ; RV32-NEXT:    vsll.vi v16, v24, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_zext_nxv8i32_nxv8f64:
@@ -1789,7 +1828,10 @@
 ; RV32:       # %bb.0:
 ; RV32-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
 ; RV32-NEXT:    vsll.vi v16, v16, 3
-; RV32-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
+; RV32-NEXT:    vsetvli zero, zero, e32, m4, ta, mu
+; RV32-NEXT:    vnsrl.wi v28, v16, 0
+; RV32-NEXT:    vsetvli zero, zero, e64, m8, ta, mu
+; RV32-NEXT:    vsoxei32.v v8, (a0), v28, v0.t
 ; RV32-NEXT:    ret
 ;
 ; RV64-LABEL: mscatter_baseidx_nxv8f64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
--- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll
@@ -1,1257 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i64(
-    <vscale x 1 x i8>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.nxv2i8.nxv2i64(
-    <vscale x 2 x i8>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.nxv4i8.nxv4i64(
-    <vscale x 4 x i8>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.nxv8i8.nxv8i64(
-    <vscale x 8 x i8>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.nxv1i16.nxv1i64(
-    <vscale x 1 x i16>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.nxv2i16.nxv2i64(
-    <vscale x 2 x i16>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.nxv4i16.nxv4i64(
-    <vscale x 4 x i16>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei64.v v26, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.nxv8i16.nxv8i64(
-    <vscale x 8 x i16>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.nxv1i32.nxv1i64(
-    <vscale x 1 x i32>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.nxv2i32.nxv2i64(
-    <vscale x 2 x i32>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei64.v v26, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.nxv4i32.nxv4i64(
-    <vscale x 4 x i32>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei64.v v28, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.nxv8i32.nxv8i64(
-    <vscale x 8 x i32>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.nxv1i64.nxv1i64(
-    <vscale x 1 x i64>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.nxv2i64.nxv2i64(
-    <vscale x 2 x i64>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.nxv4i64.nxv4i64(
-    <vscale x 4 x i64>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.nxv8i64.nxv8i64(
-    <vscale x 8 x i64>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
-  <vscale x 1 x half>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.nxv1f16.nxv1i64(
-    <vscale x 1 x half>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
-  <vscale x 2 x half>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.nxv2f16.nxv2i64(
-    <vscale x 2 x half>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
-  <vscale x 4 x half>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.nxv4f16.nxv4i64(
-    <vscale x 4 x half>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
-  <vscale x 8 x half>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vloxei64.v v26, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.nxv8f16.nxv8i64(
-    <vscale x 8 x half>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.nxv1f32.nxv1i64(
-    <vscale x 1 x float>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vloxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.nxv2f32.nxv2i64(
-    <vscale x 2 x float>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vloxei64.v v26, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.nxv4f32.nxv4i64(
-    <vscale x 4 x float>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vloxei64.v v28, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.nxv8f32.nxv8i64(
-    <vscale x 8 x float>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.nxv1f64.nxv1i64(
-    <vscale x 1 x double>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.nxv2f64.nxv2i64(
-    <vscale x 2 x double>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.nxv4f64.nxv4i64(
-    <vscale x 4 x double>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.nxv8f64.nxv8i64(
-    <vscale x 8 x double>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vloxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x double> %a
-}
 
 declare <vscale x 1 x i8> @llvm.riscv.vloxei.nxv1i8.nxv1i32(
   <vscale x 1 x i8>*,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
--- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll
@@ -1,1257 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
-declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i64(
-    <vscale x 1 x i8>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i8> @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i8> @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.nxv2i8.nxv2i64(
-    <vscale x 2 x i8>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i8> @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i8> @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.nxv4i8.nxv4i64(
-    <vscale x 4 x i8>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i8> @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i8> @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.nxv8i8.nxv8i64(
-    <vscale x 8 x i8>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i8> @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i8> @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i8> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.nxv1i16.nxv1i64(
-    <vscale x 1 x i16>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i16> @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i16> @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.nxv2i16.nxv2i64(
-    <vscale x 2 x i16>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i16> @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i16> @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.nxv4i16.nxv4i64(
-    <vscale x 4 x i16>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i16> @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i16> @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei64.v v26, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.nxv8i16.nxv8i64(
-    <vscale x 8 x i16>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i16> @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i16> @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i16> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.nxv1i32.nxv1i64(
-    <vscale x 1 x i32>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i32> @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i32> @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.nxv2i32.nxv2i64(
-    <vscale x 2 x i32>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i32> @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i32> @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei64.v v26, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.nxv4i32.nxv4i64(
-    <vscale x 4 x i32>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i32> @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i32> @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei64.v v28, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.nxv8i32.nxv8i64(
-    <vscale x 8 x i32>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i32> @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i32> @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i32> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.nxv1i64.nxv1i64(
-    <vscale x 1 x i64>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x i64> @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x i64> @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.nxv2i64.nxv2i64(
-    <vscale x 2 x i64>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x i64> @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x i64> @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.nxv4i64.nxv4i64(
-    <vscale x 4 x i64>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x i64> @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x i64> @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.nxv8i64.nxv8i64(
-    <vscale x 8 x i64>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x i64> @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x i64> @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x i64> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
-  <vscale x 1 x half>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.nxv1f16.nxv1i64(
-    <vscale x 1 x half>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x half> @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x half> @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
-  <vscale x 2 x half>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.nxv2f16.nxv2i64(
-    <vscale x 2 x half>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x half> @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x half> @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
-  <vscale x 4 x half>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.nxv4f16.nxv4i64(
-    <vscale x 4 x half>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x half> @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x half> @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
-  <vscale x 8 x half>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vluxei64.v v26, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.nxv8f16.nxv8i64(
-    <vscale x 8 x half>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x half> @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x half> @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x half> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
-  <vscale x 1 x float>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.nxv1f32.nxv1i64(
-    <vscale x 1 x float>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x float> @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x float> @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
-  <vscale x 2 x float>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vluxei64.v v25, (a0), v8
-; CHECK-NEXT:    vmv1r.v v8, v25
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.nxv2f32.nxv2i64(
-    <vscale x 2 x float>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x float> @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x float> @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
-  <vscale x 4 x float>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vluxei64.v v26, (a0), v8
-; CHECK-NEXT:    vmv2r.v v8, v26
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.nxv4f32.nxv4i64(
-    <vscale x 4 x float>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x float> @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x float> @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
-  <vscale x 8 x float>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vluxei64.v v28, (a0), v8
-; CHECK-NEXT:    vmv4r.v v8, v28
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.nxv8f32.nxv8i64(
-    <vscale x 8 x float>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x float> @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x float> @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x float> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
-  <vscale x 1 x double>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double>* %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.nxv1f64.nxv1i64(
-    <vscale x 1 x double>* %0,
-    <vscale x 1 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define <vscale x 1 x double> @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 1 x double> @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 1 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
-  <vscale x 2 x double>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double>* %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.nxv2f64.nxv2i64(
-    <vscale x 2 x double>* %0,
-    <vscale x 2 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define <vscale x 2 x double> @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 2 x double> @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 2 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
-  <vscale x 4 x double>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double>* %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.nxv4f64.nxv4i64(
-    <vscale x 4 x double>* %0,
-    <vscale x 4 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define <vscale x 4 x double> @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 4 x double> @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 4 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
-  <vscale x 8 x double>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double>* %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v8
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.nxv8f64.nxv8i64(
-    <vscale x 8 x double>* %0,
-    <vscale x 8 x i64> %1,
-    i32 %2)
-
-  ret <vscale x 8 x double> %a
-}
-
-declare <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define <vscale x 8 x double> @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, tu, mu
-; CHECK-NEXT:    vluxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  %a = call <vscale x 8 x double> @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret <vscale x 8 x double> %a
-}
 
 declare <vscale x 1 x i8> @llvm.riscv.vluxei.nxv1i8.nxv1i32(
   <vscale x 1 x i8>*,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
--- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll
@@ -1,1294 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
-declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsoxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
 declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
--- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll
@@ -1,1294 +1,6 @@
 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
 ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \
 ; RUN:   < %s | FileCheck %s
-declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
-  <vscale x 1 x i8>,
-  <vscale x 1 x i8>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64(<vscale x 1 x i8> %0, <vscale x 1 x i8>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64(
-    <vscale x 1 x i8> %0,
-    <vscale x 1 x i8>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
-  <vscale x 2 x i8>,
-  <vscale x 2 x i8>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64(<vscale x 2 x i8> %0, <vscale x 2 x i8>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64(
-    <vscale x 2 x i8> %0,
-    <vscale x 2 x i8>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
-  <vscale x 4 x i8>,
-  <vscale x 4 x i8>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64(<vscale x 4 x i8> %0, <vscale x 4 x i8>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64(
-    <vscale x 4 x i8> %0,
-    <vscale x 4 x i8>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
-  <vscale x 8 x i8>,
-  <vscale x 8 x i8>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64(<vscale x 8 x i8> %0, <vscale x 8 x i8>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64(
-    <vscale x 8 x i8> %0,
-    <vscale x 8 x i8>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
-  <vscale x 1 x i16>,
-  <vscale x 1 x i16>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64(<vscale x 1 x i16> %0, <vscale x 1 x i16>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64(
-    <vscale x 1 x i16> %0,
-    <vscale x 1 x i16>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
-  <vscale x 2 x i16>,
-  <vscale x 2 x i16>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64(<vscale x 2 x i16> %0, <vscale x 2 x i16>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64(
-    <vscale x 2 x i16> %0,
-    <vscale x 2 x i16>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
-  <vscale x 4 x i16>,
-  <vscale x 4 x i16>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64(<vscale x 4 x i16> %0, <vscale x 4 x i16>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64(
-    <vscale x 4 x i16> %0,
-    <vscale x 4 x i16>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
-  <vscale x 8 x i16>,
-  <vscale x 8 x i16>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64(<vscale x 8 x i16> %0, <vscale x 8 x i16>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64(
-    <vscale x 8 x i16> %0,
-    <vscale x 8 x i16>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
-  <vscale x 1 x i32>,
-  <vscale x 1 x i32>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64(<vscale x 1 x i32> %0, <vscale x 1 x i32>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64(
-    <vscale x 1 x i32> %0,
-    <vscale x 1 x i32>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
-  <vscale x 2 x i32>,
-  <vscale x 2 x i32>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64(<vscale x 2 x i32> %0, <vscale x 2 x i32>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64(
-    <vscale x 2 x i32> %0,
-    <vscale x 2 x i32>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
-  <vscale x 4 x i32>,
-  <vscale x 4 x i32>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64(<vscale x 4 x i32> %0, <vscale x 4 x i32>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64(
-    <vscale x 4 x i32> %0,
-    <vscale x 4 x i32>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
-  <vscale x 8 x i32>,
-  <vscale x 8 x i32>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64(<vscale x 8 x i32> %0, <vscale x 8 x i32>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64(
-    <vscale x 8 x i32> %0,
-    <vscale x 8 x i32>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
-  <vscale x 1 x i64>,
-  <vscale x 1 x i64>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64(
-    <vscale x 1 x i64> %0,
-    <vscale x 1 x i64>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
-  <vscale x 2 x i64>,
-  <vscale x 2 x i64>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64(
-    <vscale x 2 x i64> %0,
-    <vscale x 2 x i64>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
-  <vscale x 4 x i64>,
-  <vscale x 4 x i64>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64(
-    <vscale x 4 x i64> %0,
-    <vscale x 4 x i64>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
-  <vscale x 8 x i64>,
-  <vscale x 8 x i64>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64(
-    <vscale x 8 x i64> %0,
-    <vscale x 8 x i64>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
-  <vscale x 1 x half>,
-  <vscale x 1 x half>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64(<vscale x 1 x half> %0, <vscale x 1 x half>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64(
-    <vscale x 1 x half> %0,
-    <vscale x 1 x half>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
-  <vscale x 2 x half>,
-  <vscale x 2 x half>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64(<vscale x 2 x half> %0, <vscale x 2 x half>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64(
-    <vscale x 2 x half> %0,
-    <vscale x 2 x half>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
-  <vscale x 4 x half>,
-  <vscale x 4 x half>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64(<vscale x 4 x half> %0, <vscale x 4 x half>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64(
-    <vscale x 4 x half> %0,
-    <vscale x 4 x half>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
-  <vscale x 8 x half>,
-  <vscale x 8 x half>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64(<vscale x 8 x half> %0, <vscale x 8 x half>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64(
-    <vscale x 8 x half> %0,
-    <vscale x 8 x half>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
-  <vscale x 1 x float>,
-  <vscale x 1 x float>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64(<vscale x 1 x float> %0, <vscale x 1 x float>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64(
-    <vscale x 1 x float> %0,
-    <vscale x 1 x float>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
-  <vscale x 2 x float>,
-  <vscale x 2 x float>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64(<vscale x 2 x float> %0, <vscale x 2 x float>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64(
-    <vscale x 2 x float> %0,
-    <vscale x 2 x float>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
-  <vscale x 4 x float>,
-  <vscale x 4 x float>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64(<vscale x 4 x float> %0, <vscale x 4 x float>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64(
-    <vscale x 4 x float> %0,
-    <vscale x 4 x float>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
-  <vscale x 8 x float>,
-  <vscale x 8 x float>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64(<vscale x 8 x float> %0, <vscale x 8 x float>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64(
-    <vscale x 8 x float> %0,
-    <vscale x 8 x float>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
-  <vscale x 1 x double>,
-  <vscale x 1 x double>*,
-  <vscale x 1 x i64>,
-  <vscale x 1 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64(<vscale x 1 x double> %0, <vscale x 1 x double>* %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v9, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64(
-    <vscale x 1 x double> %0,
-    <vscale x 1 x double>* %1,
-    <vscale x 1 x i64> %2,
-    <vscale x 1 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
-  <vscale x 2 x double>,
-  <vscale x 2 x double>*,
-  <vscale x 2 x i64>,
-  <vscale x 2 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64(<vscale x 2 x double> %0, <vscale x 2 x double>* %1, <vscale x 2 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v10, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64(
-    <vscale x 2 x double> %0,
-    <vscale x 2 x double>* %1,
-    <vscale x 2 x i64> %2,
-    <vscale x 2 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
-  <vscale x 4 x double>,
-  <vscale x 4 x double>*,
-  <vscale x 4 x i64>,
-  <vscale x 4 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64(<vscale x 4 x double> %0, <vscale x 4 x double>* %1, <vscale x 4 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v12, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64(
-    <vscale x 4 x double> %0,
-    <vscale x 4 x double>* %1,
-    <vscale x 4 x i64> %2,
-    <vscale x 4 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i64>,
-  i32);
-
-define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, i32 %3) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i64> %2,
-    i32 %3)
-
-  ret void
-}
-
-declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
-  <vscale x 8 x double>,
-  <vscale x 8 x double>*,
-  <vscale x 8 x i64>,
-  <vscale x 8 x i1>,
-  i32);
-
-define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64(<vscale x 8 x double> %0, <vscale x 8 x double>* %1, <vscale x 8 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
-; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64:
-; CHECK:       # %bb.0: # %entry
-; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
-; CHECK-NEXT:    vsuxei64.v v8, (a0), v16, v0.t
-; CHECK-NEXT:    ret
-entry:
-  call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64(
-    <vscale x 8 x double> %0,
-    <vscale x 8 x double>* %1,
-    <vscale x 8 x i64> %2,
-    <vscale x 8 x i1> %3,
-    i32 %4)
-
-  ret void
-}
-
 declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32(
   <vscale x 1 x i8>,
   <vscale x 1 x i8>*,
diff --git a/llvm/test/MC/RISCV/rvv/invalid-eew.s b/llvm/test/MC/RISCV/rvv/invalid-eew.s
new file mode 100644
--- /dev/null
+++ b/llvm/test/MC/RISCV/rvv/invalid-eew.s
@@ -0,0 +1,195 @@
+# RUN: not llvm-mc -triple=riscv32 --mattr=+experimental-v \
+# RUN:             --mattr=+experimental-zvlsseg %s 2>&1 \
+# RUN:        | FileCheck %s --check-prefix=CHECK-ERROR
+
+vluxei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxei64.v v24, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxei64.v v24, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxei64.v v24, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxei64.v v24, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg2ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg2ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg3ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg3ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg4ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg4ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg5ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg5ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg6ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg6ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg7ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg7ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg8ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vluxseg8ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg2ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg2ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg3ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg3ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg4ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg4ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg5ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg5ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg6ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg6ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg7ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg7ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg8ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vloxseg8ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg2ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg2ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg3ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg3ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg4ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg4ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg5ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg5ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg6ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg6ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg7ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg7ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg8ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsuxseg8ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg2ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg2ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg3ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg3ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg4ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg4ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg5ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg5ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg6ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg6ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg7ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg7ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg8ei64.v v8, (a0), v4, v0.t
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set
+
+vsoxseg8ei64.v v8, (a0), v4
+# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set