diff --git a/clang/include/clang/Basic/riscv_vector.td b/clang/include/clang/Basic/riscv_vector.td --- a/clang/include/clang/Basic/riscv_vector.td +++ b/clang/include/clang/Basic/riscv_vector.td @@ -205,7 +205,7 @@ string HeaderCode = ""; // Sub extension of vector spec. Currently only support Zvamo or Zvlsseg. - string RequiredExtension = ""; + list RequiredExtensions = []; // Number of fields for Zvlsseg. int NF = 1; @@ -673,7 +673,7 @@ Ops[1] = Builder.CreateBitCast(Ops[1], ResultType->getPointerTo()); }] in { foreach type = TypeList in { - foreach eew_list = EEWList in { + foreach eew_list = EEWList[0-2] in { defvar eew = eew_list[0]; defvar eew_type = eew_list[1]; let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in { @@ -683,6 +683,15 @@ } } } + defvar eew64 = "64"; + defvar eew64_type = "(Log2EEW:6)"; + let Name = op # eew64 # "_v", IRName = op, IRNameMask = op # "_mask", + RequiredExtensions = ["RV64"] in { + def: RVVBuiltin<"v", "vPCe" # eew64_type # "Uv", type>; + if !not(IsFloat.val) then { + def: RVVBuiltin<"Uv", "UvPCUe" # eew64_type # "Uv", type>; + } + } } } } @@ -760,7 +769,7 @@ IntrinsicTypes = {Ops[0]->getType(), Ops[2]->getType(), Ops[4]->getType()}; }] in { foreach type = TypeList in { - foreach eew_list = EEWList in { + foreach eew_list = EEWList[0-2] in { defvar eew = eew_list[0]; defvar eew_type = eew_list[1]; let Name = op # eew # "_v", IRName = op, IRNameMask = op # "_mask" in { @@ -770,6 +779,15 @@ } } } + defvar eew64 = "64"; + defvar eew64_type = "(Log2EEW:6)"; + let Name = op # eew64 # "_v", IRName = op, IRNameMask = op # "_mask", + RequiredExtensions = ["RV64"] in { + def : RVVBuiltin<"v", "0Pe" # eew64_type # "Uvv", type>; + if !not(IsFloat.val) then { + def : RVVBuiltin<"Uv", "0PUe" # eew64_type # "UvUv", type>; + } + } } } } @@ -1247,13 +1265,13 @@ defm vle64ff: RVVVLEFFBuiltin<["l", "d"]>; // 7.8 Vector Load/Store Segment Instructions -let RequiredExtension = "Zvlsseg" in { +let RequiredExtensions = ["Zvlsseg"] in { defm : RVVUnitStridedSegLoad<"vlseg">; defm : RVVUnitStridedSegLoadFF<"vlseg">; } // 8. Vector AMO Operations -let RequiredExtension = "Zvamo" in { +let RequiredExtensions = ["Zvamo"] in { defm vamoswap : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true, /* hasFP */ true>; defm vamoadd : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>; defm vamoxor : RVVAMOBuiltinSet< /* hasSigned */ true, /* hasUnsigned */ true>; diff --git a/clang/utils/TableGen/RISCVVEmitter.cpp b/clang/utils/TableGen/RISCVVEmitter.cpp --- a/clang/utils/TableGen/RISCVVEmitter.cpp +++ b/clang/utils/TableGen/RISCVVEmitter.cpp @@ -142,6 +142,7 @@ Zfh = 1 << 3, Zvamo = 1 << 4, Zvlsseg = 1 << 5, + RV64 = 1 << 6, }; // TODO refactor RVVIntrinsic class design after support all intrinsic @@ -175,7 +176,7 @@ bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &Types, const std::vector &IntrinsicTypes, - StringRef RequiredExtension, unsigned NF); + const std::vector RequiredExtensions, unsigned NF); ~RVVIntrinsic() = default; StringRef getName() const { return Name; } @@ -760,7 +761,8 @@ bool HasNoMaskedOverloaded, bool HasAutoDef, StringRef ManualCodegen, const RVVTypes &OutInTypes, const std::vector &NewIntrinsicTypes, - StringRef RequiredExtension, unsigned NF) + const std::vector RequiredExtensions, + unsigned NF) : IRName(IRName), HasSideEffects(HasSideEffects), IsMask(IsMask), HasMaskedOffOperand(HasMaskedOffOperand), HasVL(HasVL), HasNoMaskedOverloaded(HasNoMaskedOverloaded), HasAutoDef(HasAutoDef), @@ -788,10 +790,14 @@ else if (T->isFloatVector(64) || T->isFloat(64)) RISCVExtensions |= RISCVExtension::D; } - if (RequiredExtension == "Zvamo") - RISCVExtensions |= RISCVExtension::Zvamo; - if (RequiredExtension == "Zvlsseg") - RISCVExtensions |= RISCVExtension::Zvlsseg; + for (auto Extension : RequiredExtensions) { + if (Extension == "Zvamo") + RISCVExtensions |= RISCVExtension::Zvamo; + if (Extension == "Zvlsseg") + RISCVExtensions |= RISCVExtension::Zvlsseg; + if (Extension == "RV64") + RISCVExtensions |= RISCVExtension::RV64; + } // Init OutputType and InputTypes OutputType = OutInTypes[0]; @@ -1091,7 +1097,8 @@ StringRef ManualCodegenMask = R->getValueAsString("ManualCodegenMask"); std::vector IntrinsicTypes = R->getValueAsListOfInts("IntrinsicTypes"); - StringRef RequiredExtension = R->getValueAsString("RequiredExtension"); + std::vector RequiredExtensions = + R->getValueAsListOfStrings("RequiredExtensions"); StringRef IRName = R->getValueAsString("IRName"); StringRef IRNameMask = R->getValueAsString("IRNameMask"); unsigned NF = R->getValueAsInt("NF"); @@ -1159,7 +1166,7 @@ Name, SuffixStr, MangledName, MangledSuffixStr, IRName, HasSideEffects, /*IsMask=*/false, /*HasMaskedOffOperand=*/false, HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegen, - Types.getValue(), IntrinsicTypes, RequiredExtension, NF)); + Types.getValue(), IntrinsicTypes, RequiredExtensions, NF)); if (HasMask) { // Create a mask intrinsic Optional MaskTypes = @@ -1168,7 +1175,7 @@ Name, SuffixStr, MangledName, MangledSuffixStr, IRNameMask, HasSideEffects, /*IsMask=*/true, HasMaskedOffOperand, HasVL, HasNoMaskedOverloaded, HasAutoDef, ManualCodegenMask, - MaskTypes.getValue(), IntrinsicTypes, RequiredExtension, NF)); + MaskTypes.getValue(), IntrinsicTypes, RequiredExtensions, NF)); } } // end for Log2LMULList } // end for TypeRange @@ -1249,6 +1256,8 @@ OS << LS << "defined(__riscv_zvamo)"; if (Extents & RISCVExtension::Zvlsseg) OS << LS << "defined(__riscv_zvlsseg)"; + if (Extents & RISCVExtension::RV64) + OS << LS << "(__riscv_xlen == 64)"; OS << "\n"; return true; } diff --git a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp --- a/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp +++ b/llvm/lib/Target/RISCV/RISCVISelDAGToDAG.cpp @@ -345,6 +345,10 @@ RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); + if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { + errs() << "The V extension does not support EEW=64 for index values " + "when XLEN=32\n"; + } const RISCV::VLXSEGPseudo *P = RISCV::getVLXSEGPseudo( NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), static_cast(IndexLMUL)); @@ -422,6 +426,10 @@ RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); + if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { + errs() << "The V extension does not support EEW=64 for index values " + "when XLEN=32\n"; + } const RISCV::VSXSEGPseudo *P = RISCV::getVSXSEGPseudo( NF, IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), static_cast(IndexLMUL)); @@ -888,6 +896,10 @@ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); + if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { + errs() << "The V extension does not support EEW=64 for index values " + "when XLEN=32\n"; + } const RISCV::VLX_VSXPseudo *P = RISCV::getVLXPseudo( IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), static_cast(IndexLMUL)); @@ -1077,6 +1089,10 @@ RISCVII::VLMUL LMUL = RISCVTargetLowering::getLMUL(VT); RISCVII::VLMUL IndexLMUL = RISCVTargetLowering::getLMUL(IndexVT); unsigned IndexLog2EEW = Log2_32(IndexVT.getScalarSizeInBits()); + if (IndexLog2EEW == 6 && !Subtarget->is64Bit()) { + errs() << "The V extension does not support EEW=64 for index values " + "when XLEN=32\n"; + } const RISCV::VLX_VSXPseudo *P = RISCV::getVSXPseudo( IsMasked, IsOrdered, IndexLog2EEW, static_cast(LMUL), static_cast(IndexLMUL)); diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoV.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoV.td @@ -492,23 +492,29 @@ def VLUXEI8_V : VIndexedLoad; def VLUXEI16_V : VIndexedLoad; def VLUXEI32_V : VIndexedLoad; -def VLUXEI64_V : VIndexedLoad; def VLOXEI8_V : VIndexedLoad; def VLOXEI16_V : VIndexedLoad; def VLOXEI32_V : VIndexedLoad; -def VLOXEI64_V : VIndexedLoad; def VSUXEI8_V : VIndexedStore; def VSUXEI16_V : VIndexedStore; def VSUXEI32_V : VIndexedStore; -def VSUXEI64_V : VIndexedStore; def VSOXEI8_V : VIndexedStore; def VSOXEI16_V : VIndexedStore; def VSOXEI32_V : VIndexedStore; +} // Predicates = [HasStdExtV] + +let Predicates = [HasStdExtV, IsRV64] in { +// Vector Indexed Instructions with EEW=64 for index values +def VLUXEI64_V : VIndexedLoad; +def VLOXEI64_V : VIndexedLoad; +def VSUXEI64_V : VIndexedStore; def VSOXEI64_V : VIndexedStore; +} // Predicates = [HasStdExtV, IsRV64] +let Predicates = [HasStdExtV] in { defm VL1R : VWholeLoad<0, "vl1r", VR>; defm VL2R : VWholeLoad<1, "vl2r", VRM2>; defm VL4R : VWholeLoad<3, "vl4r", VRM4>; @@ -1053,8 +1059,8 @@ } // hasSideEffects = 0, mayLoad = 0, mayStore = 0 } // Predicates = [HasStdExtV] -let Predicates = [HasStdExtZvlsseg] in { - foreach nf=2-8 in { +foreach nf=2-8 in { + let Predicates = [HasStdExtZvlsseg] in { def VLSEG#nf#E8_V : VUnitStrideSegmentLoad; def VLSEG#nf#E16_V : VUnitStrideSegmentLoad; def VLSEG#nf#E32_V : VUnitStrideSegmentLoad; @@ -1088,8 +1094,6 @@ LSWidth16, "vluxseg"#nf#"ei16.v">; def VLUXSEG#nf#EI32_V : VIndexedSegmentLoad; - def VLUXSEG#nf#EI64_V : VIndexedSegmentLoad; def VLOXSEG#nf#EI8_V : VIndexedSegmentLoad; @@ -1097,8 +1101,6 @@ LSWidth16, "vloxseg"#nf#"ei16.v">; def VLOXSEG#nf#EI32_V : VIndexedSegmentLoad; - def VLOXSEG#nf#EI64_V : VIndexedSegmentLoad; def VSUXSEG#nf#EI8_V : VIndexedSegmentStore; @@ -1106,8 +1108,6 @@ LSWidth16, "vsuxseg"#nf#"ei16.v">; def VSUXSEG#nf#EI32_V : VIndexedSegmentStore; - def VSUXSEG#nf#EI64_V : VIndexedSegmentStore; def VSOXSEG#nf#EI8_V : VIndexedSegmentStore; @@ -1115,10 +1115,21 @@ LSWidth16, "vsoxseg"#nf#"ei16.v">; def VSOXSEG#nf#EI32_V : VIndexedSegmentStore; + } // Predicates = [HasStdExtZvlsseg] + + // Vector Indexed Instructions with EEW=64 for index values + let Predicates = [HasStdExtZvlsseg, IsRV64] in { + def VLUXSEG#nf#EI64_V : VIndexedSegmentLoad; + def VLOXSEG#nf#EI64_V : VIndexedSegmentLoad; + def VSUXSEG#nf#EI64_V : VIndexedSegmentStore; def VSOXSEG#nf#EI64_V : VIndexedSegmentStore; - } -} // Predicates = [HasStdExtZvlsseg] + } // Predicates = [HasStdExtZvlsseg, IsRV64] +} + let Predicates = [HasStdExtZvamo, HasStdExtA] in { defm VAMOSWAPEI8 : VAMO; diff --git a/llvm/test/CodeGen/RISCV/rvv/invalid-eew.ll b/llvm/test/CodeGen/RISCV/rvv/invalid-eew.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/invalid-eew.ll @@ -0,0 +1,4146 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ +; RUN: < %s 2>&1 | FileCheck %s + +declare @llvm.riscv.vluxei.nxv1i8.nxv1i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i8.nxv2i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i8.nxv4i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i8.nxv8i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i16.nxv1i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i16.nxv2i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i16.nxv4i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i16.nxv8i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i32.nxv1i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i32.nxv2i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i32.nxv4i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i32.nxv8i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1i64.nxv1i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2i64.nxv2i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4i64.nxv4i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8i64.nxv8i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f16.nxv1i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f16.nxv2i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f16.nxv4i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f16.nxv8i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f32.nxv1i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f32.nxv2i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f32.nxv4i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f32.nxv8i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv1f64.nxv1i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv2f64.nxv2i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv4f64.nxv4i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vluxei.nxv8f64.nxv8i64( + *, + , + i32); + +define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i8.nxv1i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i8.nxv2i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i8.nxv4i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i8.nxv8i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i16.nxv1i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i16.nxv2i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i16.nxv4i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i16.nxv8i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i32.nxv1i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i32.nxv2i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i32.nxv4i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i32.nxv8i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1i64.nxv1i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2i64.nxv2i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4i64.nxv4i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8i64.nxv8i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f16.nxv1i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f16.nxv2i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f16.nxv4i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f16.nxv8i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f32.nxv1i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f32.nxv2i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f32.nxv4i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f32.nxv8i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv1f64.nxv1i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv2f64.nxv2i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv4f64.nxv4i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vloxei.nxv8f64.nxv8i64( + *, + , + i32); + +define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(* %0, %1, i32 %2) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i64( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( + , + *, + , + , + i32); + +define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( + , + *, + , + i32); + +define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( + , + *, + , + , + i32); + +define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { +; CHECK: The V extension does not support EEW=64 for index values when XLEN=32 +entry: + call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vloxei-rv32.ll @@ -1,1257 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vloxei.nxv1i8.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1i8.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1i8.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2i8.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2i8.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2i8.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4i8.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4i8.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4i8.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8i8.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8i8.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8i8.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv1i16.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1i16.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1i16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2i16.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2i16.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2i16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4i16.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4i16.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4i16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8i16.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8i16.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8i16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv1i32.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1i32.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1i32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2i32.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2i32.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2i32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4i32.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4i32.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4i32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8i32.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxei64.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8i32.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8i32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv1i64.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1i64.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1i64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2i64.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2i64.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2i64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4i64.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4i64.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4i64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8i64.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8i64.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8i64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv1f16.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1f16.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1f16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2f16.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2f16.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2f16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4f16.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4f16.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4f16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8f16.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vloxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8f16.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8f16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv1f32.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1f32.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1f32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2f32.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vloxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2f32.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2f32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4f32.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vloxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4f32.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4f32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8f32.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vloxei64.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8f32.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8f32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv1f64.nxv1i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv1f64.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv1f64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv2f64.nxv2i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv2f64.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv2f64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv4f64.nxv4i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv4f64.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv4f64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vloxei.nxv8f64.nxv8i64( - *, - , - i32); - -define @intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vloxei_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.nxv8f64.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vloxei_mask_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu -; CHECK-NEXT: vloxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vloxei.mask.nxv8f64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} declare @llvm.riscv.vloxei.nxv1i8.nxv1i32( *, diff --git a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vluxei-rv32.ll @@ -1,1257 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare @llvm.riscv.vluxei.nxv1i8.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1i8.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1i8.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2i8.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2i8.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2i8.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4i8.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4i8.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4i8.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8i8.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8i8.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8i8.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv1i16.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1i16.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1i16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2i16.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2i16.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2i16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4i16.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4i16.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4i16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8i16.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8i16.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8i16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv1i32.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1i32.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1i32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2i32.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2i32.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2i32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4i32.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4i32.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4i32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8i32.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxei64.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8i32.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8i32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv1i64.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1i64.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1i64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2i64.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2i64.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2i64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4i64.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4i64.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4i64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8i64.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8i64.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8i64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv1f16.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1f16.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1f16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2f16.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2f16.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2f16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4f16.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4f16.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4f16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8f16.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vluxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8f16.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8f16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv1f32.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1f32.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1f32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2f32.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vluxei64.v v25, (a0), v8 -; CHECK-NEXT: vmv1r.v v8, v25 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2f32.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2f32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4f32.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vluxei64.v v26, (a0), v8 -; CHECK-NEXT: vmv2r.v v8, v26 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4f32.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4f32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8f32.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vluxei64.v v28, (a0), v8 -; CHECK-NEXT: vmv4r.v v8, v28 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8f32.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8f32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv1f64.nxv1i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv1f64.nxv1i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv1f64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv2f64.nxv2i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv2f64.nxv2i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv2f64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv4f64.nxv4i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv4f64.nxv4i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv4f64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} - -declare @llvm.riscv.vluxei.nxv8f64.nxv8i64( - *, - , - i32); - -define @intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64(* %0, %1, i32 %2) nounwind { -; CHECK-LABEL: intrinsic_vluxei_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v8 -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.nxv8f64.nxv8i64( - * %0, - %1, - i32 %2) - - ret %a -} - -declare @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( - , - *, - , - , - i32); - -define @intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vluxei_mask_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, tu, mu -; CHECK-NEXT: vluxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - %a = call @llvm.riscv.vluxei.mask.nxv8f64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret %a -} declare @llvm.riscv.vluxei.nxv1i8.nxv1i32( *, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsoxei-rv32.ll @@ -1,1294 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1i8.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1i8.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2i8.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2i8.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4i8.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4i8.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8i8.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8i8.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1i16.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1i16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2i16.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2i16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4i16.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4i16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8i16.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8i16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1i32.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1i32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2i32.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2i32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4i32.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4i32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8i32.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8i32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1i64.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1i64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2i64.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2i64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4i64.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4i64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8i64.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8i64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1f16.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1f16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2f16.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2f16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4f16.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4f16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8f16.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8f16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1f32.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1f32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2f32.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2f32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4f32.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4f32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8f32.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8f32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv1f64.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv1f64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv2f64.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv2f64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv4f64.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv4f64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.nxv8f64.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsoxei_mask_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsoxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsoxei.mask.nxv8f64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - declare void @llvm.riscv.vsoxei.nxv1i8.nxv1i32( , *, diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll --- a/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxei-rv32.ll @@ -1,1294 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py ; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ ; RUN: < %s | FileCheck %s -declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1i8.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i8_nxv1i8_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf8, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1i8.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2i8.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i8_nxv2i8_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2i8.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4i8.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i8_nxv4i8_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4i8.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8i8.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i8_nxv8i8_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e8, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8i8.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1i16.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i16_nxv1i16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1i16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2i16.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i16_nxv2i16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2i16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4i16.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i16_nxv4i16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4i16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8i16.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i16_nxv8i16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8i16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1i32.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i32_nxv1i32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1i32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2i32.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i32_nxv2i32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2i32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4i32.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i32_nxv4i32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4i32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8i32.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i32_nxv8i32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8i32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1i64.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1i64_nxv1i64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1i64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2i64.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2i64_nxv2i64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2i64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4i64.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4i64_nxv4i64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4i64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8i64.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8i64_nxv8i64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8i64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1f16.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f16_nxv1f16_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1f16.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2f16.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f16_nxv2f16_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2f16.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4f16.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f16_nxv4f16_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4f16.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8f16.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f16_nxv8f16_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e16, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8f16.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1f32.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f32_nxv1f32_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, mf2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1f32.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2f32.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f32_nxv2f32_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2f32.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4f32.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f32_nxv4f32_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4f32.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8f32.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f32_nxv8f32_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e32, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8f32.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv1f64.nxv1i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv1f64_nxv1f64_nxv1i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m1, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v9, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv1f64.nxv1i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv2f64.nxv2i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv2f64_nxv2f64_nxv2i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m2, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v10, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv2f64.nxv2i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv4f64.nxv4i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv4f64_nxv4f64_nxv4i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m4, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v12, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv4f64.nxv4i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - -declare void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( - , - *, - , - i32); - -define void @intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i32 %3) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16 -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.nxv8f64.nxv8i64( - %0, - * %1, - %2, - i32 %3) - - ret void -} - -declare void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( - , - *, - , - , - i32); - -define void @intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i32 %4) nounwind { -; CHECK-LABEL: intrinsic_vsuxei_mask_v_nxv8f64_nxv8f64_nxv8i64: -; CHECK: # %bb.0: # %entry -; CHECK-NEXT: vsetvli zero, a1, e64, m8, ta, mu -; CHECK-NEXT: vsuxei64.v v8, (a0), v16, v0.t -; CHECK-NEXT: ret -entry: - call void @llvm.riscv.vsuxei.mask.nxv8f64.nxv8i64( - %0, - * %1, - %2, - %3, - i32 %4) - - ret void -} - declare void @llvm.riscv.vsuxei.nxv1i8.nxv1i32( , *, diff --git a/llvm/test/MC/RISCV/rvv/invalid-eew.s b/llvm/test/MC/RISCV/rvv/invalid-eew.s new file mode 100644 --- /dev/null +++ b/llvm/test/MC/RISCV/rvv/invalid-eew.s @@ -0,0 +1,195 @@ +# RUN: not llvm-mc -triple=riscv32 --mattr=+experimental-v \ +# RUN: --mattr=+experimental-zvlsseg %s 2>&1 \ +# RUN: | FileCheck %s --check-prefix=CHECK-ERROR + +vluxei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxei64.v v24, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxei64.v v24, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxei64.v v24, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxei64.v v24, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg2ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg2ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg3ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg3ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg4ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg4ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg5ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg5ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg6ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg6ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg7ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg7ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg8ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vluxseg8ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg2ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg2ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg3ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg3ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg4ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg4ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg5ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg5ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg6ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg6ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg7ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg7ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg8ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vloxseg8ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg2ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg2ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg3ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg3ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg4ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg4ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg5ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg5ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg6ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg6ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg7ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg7ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg8ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsuxseg8ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg2ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg2ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg3ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg3ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg4ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg4ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg5ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg5ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg6ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg6ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg7ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg7ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg8ei64.v v8, (a0), v4, v0.t +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set + +vsoxseg8ei64.v v8, (a0), v4 +# CHECK-ERROR: instruction requires the following: RV64I Base Instruction Set