diff --git a/llvm/include/llvm/IR/IntrinsicsRISCV.td b/llvm/include/llvm/IR/IntrinsicsRISCV.td --- a/llvm/include/llvm/IR/IntrinsicsRISCV.td +++ b/llvm/include/llvm/IR/IntrinsicsRISCV.td @@ -110,6 +110,21 @@ LLVMPointerType>, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>], [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For indexed load + // Input: (pointer, index, vl) + class RISCVILoad + : Intrinsic<[llvm_anyvector_ty], + [LLVMPointerType>, + llvm_anyvector_ty, llvm_anyint_ty], + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; + // For indexed load with mask + // Input: (maskedoff, pointer, index, mask, vl) + class RISCVILoadMask + : Intrinsic<[llvm_anyvector_ty ], + [LLVMMatchType<0>, + LLVMPointerType>, llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], + [NoCapture>, IntrReadMem]>, RISCVVIntrinsic; // For unit stride store // Input: (vector_in, pointer, vl) class RISCVUSStore @@ -143,6 +158,22 @@ LLVMPointerType>, llvm_anyint_ty, LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, LLVMMatchType<1>], [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + // For indexed store + // Input: (vector_in, pointer, index, vl) + class RISCVIStore + : Intrinsic<[], + [llvm_anyvector_ty, + LLVMPointerType>, + llvm_anyint_ty, llvm_anyint_ty], + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; + // For indexed store with mask + // Input: (vector_in, index, index, mask, vl) + class RISCVIStoreMask + : Intrinsic<[], + [llvm_anyvector_ty, + LLVMPointerType>, llvm_anyvector_ty, + LLVMScalarOrSameVectorWidth<0, llvm_i1_ty>, llvm_anyint_ty], + [NoCapture>, IntrWriteMem]>, RISCVVIntrinsic; // For destination vector type is the same as first source vector. // Input: (vector_in, vector_in/scalar_in, vl) class RISCVBinaryAAXNoMask @@ -237,6 +268,10 @@ def "int_riscv_" # NAME : RISCVSLoad; def "int_riscv_" # NAME # "_mask" : RISCVSLoadMask; } + multiclass RISCVILoad { + def "int_riscv_" # NAME : RISCVILoad; + def "int_riscv_" # NAME # "_mask" : RISCVILoadMask; + } multiclass RISCVUSStore { def "int_riscv_" # NAME : RISCVUSStore; def "int_riscv_" # NAME # "_mask" : RISCVUSStoreMask; @@ -245,7 +280,10 @@ def "int_riscv_" # NAME : RISCVSStore; def "int_riscv_" # NAME # "_mask" : RISCVSStoreMask; } - + multiclass RISCVIStore { + def "int_riscv_" # NAME : RISCVIStore; + def "int_riscv_" # NAME # "_mask" : RISCVIStoreMask; + } multiclass RISCVBinaryAAX { def "int_riscv_" # NAME : RISCVBinaryAAXNoMask; def "int_riscv_" # NAME # "_mask" : RISCVBinaryAAXMask; @@ -272,6 +310,9 @@ defm vse : RISCVUSStore; defm vlse: RISCVSLoad; defm vsse: RISCVSStore; + defm vlxe: RISCVILoad; + defm vsxe: RISCVIStore; + defm vsuxe: RISCVIStore; defm vadd : RISCVBinaryAAX; defm vsub : RISCVBinaryAAX; diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td --- a/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td +++ b/llvm/lib/Target/RISCV/RISCVInstrInfoVPseudos.td @@ -343,6 +343,39 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoILoadNoMask: + Pseudo<(outs RetClass:$rd), + (ins GPR:$rs1, IdxClass:$rs2, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 3; + let SEWIndex = 4; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoILoadMask: + Pseudo<(outs GetVRegNoV0.R:$rd), + (ins GetVRegNoV0.R:$merge, + GPR:$rs1, IdxClass:$rs2, + VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo { + let mayLoad = 1; + let mayStore = 0; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Constraints = "$rd = $merge"; + let Uses = [VL, VTYPE]; + let VLIndex = 5; + let SEWIndex = 6; + let MergeOpIndex = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoUSStoreNoMask: Pseudo<(outs), (ins StClass:$rd, GPR:$rs1, GPR:$vl, ixlenimm:$sew),[]>, @@ -420,6 +453,35 @@ let BaseInstr = !cast(PseudoToVInst.VInst); } +class VPseudoIStoreNoMask: + Pseudo<(outs), + (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 3; + let SEWIndex = 4; + let HasDummyMask = 1; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + +class VPseudoIStoreMask: + Pseudo<(outs), + (ins StClass:$rd, GPR:$rs1, IdxClass:$rs2, VMaskOp:$vm, GPR:$vl, ixlenimm:$sew),[]>, + RISCVVPseudo { + let mayLoad = 0; + let mayStore = 1; + let hasSideEffects = 0; + let usesCustomInserter = 1; + let Uses = [VL, VTYPE]; + let VLIndex = 4; + let SEWIndex = 5; + let BaseInstr = !cast(PseudoToVInst.VInst); +} + class VPseudoBinaryMask; + def "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : VPseudoILoadMask; + } + } +} + multiclass VPseudoUSStore { foreach lmul = MxList.m in { defvar LInfo = lmul.MX; @@ -510,6 +586,20 @@ } } +multiclass VPseudoIStore { + foreach lmul = MxList.m in + foreach idx_lmul = MxList.m in { + defvar LInfo = lmul.MX; + defvar Vreg = lmul.vrclass; + defvar IdxLInfo = idx_lmul.MX; + defvar IdxVreg = idx_lmul.vrclass; + let VLMul = lmul.value in { + def "_V_" # IdxLInfo # "_" # LInfo : VPseudoIStoreNoMask; + def "_V_" # IdxLInfo # "_" # LInfo # "_MASK" : VPseudoIStoreMask; + } + } +} + multiclass VPseudoBinary; } +multiclass VPatILoad +{ + defvar Intr = !cast(intrinsic); + defvar Pseudo = !cast(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX); + def : Pat<(type (Intr GPR:$rs1, (idx_type idx_reg_class:$rs2), GPR:$vl)), + (Pseudo $rs1, $rs2, (NoX0 GPR:$vl), sew)>; + + defvar IntrMask = !cast(intrinsic # "_mask"); + defvar PseudoMask = !cast(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX#"_MASK"); + def : Pat<(type (IntrMask (type GetVRegNoV0.R:$merge), + GPR:$rs1, (idx_type idx_reg_class:$rs2), + (mask_type V0), GPR:$vl)), + (PseudoMask $merge, + $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>; +} + multiclass VPatUSStore; } +multiclass VPatIStore +{ + defvar Intr = !cast(intrinsic); + defvar Pseudo = !cast(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX); + def : Pat<(Intr (type reg_class:$rs3), GPR:$rs1, + (idx_type idx_reg_class:$rs2), GPR:$vl), + (Pseudo $rs3, $rs1, $rs2, (NoX0 GPR:$vl), sew)>; + defvar IntrMask = !cast(intrinsic # "_mask"); + defvar PseudoMask = !cast(inst#"_V_"#idx_vlmul.MX#"_"#vlmul.MX#"_MASK"); + def : Pat<(IntrMask (type reg_class:$rs3), GPR:$rs1, + (idx_type idx_reg_class:$rs2), (mask_type V0), GPR:$vl), + (PseudoMask $rs3, $rs1, $rs2, (mask_type V0), (NoX0 GPR:$vl), sew)>; +} + multiclass VPatBinary; } +//===----------------------------------------------------------------------===// +// 7.6 Vector Indexed Instructions +//===----------------------------------------------------------------------===// + +foreach vti = AllVectors in +foreach eew = EEWList in { + defvar vlmul = vti.LMul; + defvar octuple_lmul = !cond(!eq(vti.LMul.MX, "MF8") : 1, + !eq(vti.LMul.MX, "MF4") : 2, + !eq(vti.LMul.MX, "MF2") : 4, + !eq(vti.LMul.MX, "M1") : 8, + !eq(vti.LMul.MX, "M2") : 16, + !eq(vti.LMul.MX, "M4") : 32, + !eq(vti.LMul.MX, "M8") : 64); + defvar log_sew = shift_amount.val; + // The data vector register group has EEW=SEW, EMUL=LMUL, while the offset + // vector register group has EEW encoding in the instruction and EMUL=(EEW/SEW)*LMUL. + // calculate octuple elmul which is (eew * octuple_lmul) >> log_sew + defvar octuple_elmul = !srl(!mul(eew, octuple_lmul), log_sew); + // legal octuple elmul should be more than 0 and less than equal 64 + if !gt(octuple_elmul, 0) then { + if !le(octuple_elmul, 64) then { + defvar log_elmul = shift_amount.val; + // 0, 1, 2 -> V_MF8 ~ V_MF2 + // 3, 4, 5, 6 -> V_M1 ~ V_M8 + defvar elmul_str = !if(!eq(log_elmul, 0), "MF8", + !if(!eq(log_elmul, 1), "MF4", + !if(!eq(log_elmul, 2), "MF2", + "M" # !cast(!shl(1, !add(log_elmul, -3)))))); + defvar elmul =!cast("V_" # elmul_str); + defvar idx_vti = !cast("VI" # eew # elmul_str); + + defm : VPatILoad<"int_riscv_vlxe", + "PseudoVLXEI"#eew, + vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW, + vlmul, elmul, vti.RegClass, idx_vti.RegClass>; + defm : VPatIStore<"int_riscv_vsxe", + "PseudoVSXEI"#eew, + vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW, + vlmul, elmul, vti.RegClass, idx_vti.RegClass>; + defm : VPatIStore<"int_riscv_vsuxe", + "PseudoVSUXEI"#eew, + vti.Vector, idx_vti.Vector, vti.Mask, vti.SEW, + vlmul, elmul, vti.RegClass, idx_vti.RegClass>; + } + } +} + + //===----------------------------------------------------------------------===// // 12. Vector Integer Arithmetic Instructions //===----------------------------------------------------------------------===// diff --git a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv32.ll @@ -0,0 +1,3281 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vlxe.nxv1i8.nxv1i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i8.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i8.nxv2i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i8.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i8.nxv4i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i8.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i8.nxv8i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i8.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i8.nxv16i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i8.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i16.nxv1i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i16.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i16.nxv2i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i16.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i16.nxv4i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i16.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i16.nxv8i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i16.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i16.nxv16i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i16.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i32.nxv1i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i32.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i32.nxv2i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i32.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i32.nxv4i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i32.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i32.nxv8i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i32.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i32.nxv16i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i32.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f16.nxv1i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f16.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f16.nxv2i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f16.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f16.nxv4i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f16.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f16.nxv8i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f16.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16f16.nxv16i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16f16.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f32.nxv1i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f32.nxv1i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f32.nxv2i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f32.nxv2i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f32.nxv4i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f32.nxv4i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f32.nxv8i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f32.nxv8i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16f32.nxv16i32( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16f32.nxv16i32( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i8.nxv1i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i8.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i8.nxv2i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i8.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i8.nxv4i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i8.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i8.nxv8i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i8.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i8.nxv16i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i8.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv32i8.nxv32i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv32i8.nxv32i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i16.nxv1i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i16.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i16.nxv2i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i16.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i16.nxv4i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i16.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i16.nxv8i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i16.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i16.nxv16i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i16.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv32i16.nxv32i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv32i16.nxv32i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i32.nxv1i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i32.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i32.nxv2i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i32.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i32.nxv4i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i32.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i32.nxv8i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i32.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i32.nxv16i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i32.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f16.nxv1i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f16.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f16.nxv2i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f16.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f16.nxv4i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f16.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f16.nxv8i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f16.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16f16.nxv16i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16f16.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv32f16.nxv32i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv32f16.nxv32i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f32.nxv1i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f32.nxv1i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f32.nxv2i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f32.nxv2i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f32.nxv4i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f32.nxv4i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f32.nxv8i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f32.nxv8i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16f32.nxv16i16( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16f32.nxv16i16( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i8.nxv1i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i8.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i8.nxv2i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i8.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i8.nxv4i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i8.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i8.nxv8i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i8.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i8.nxv16i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i8.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv32i8.nxv32i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv32i8.nxv32i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv64i8.nxv64i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv64i8.nxv64i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i16.nxv1i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i16.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i16.nxv2i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i16.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i16.nxv4i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i16.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i16.nxv8i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i16.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i16.nxv16i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i16.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv32i16.nxv32i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv32i16.nxv32i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i32.nxv1i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i32.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i32.nxv2i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i32.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i32.nxv4i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i32.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i32.nxv8i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i32.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i32.nxv16i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i32.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f16.nxv1i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f16.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f16.nxv2i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f16.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f16.nxv4i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f16.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f16.nxv8i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f16.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16f16.nxv16i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16f16.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv32f16.nxv32i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv32f16.nxv32i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f32.nxv1i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f32.nxv1i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f32.nxv2i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f32.nxv2i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f32.nxv4i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f32.nxv4i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f32.nxv8i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f32.nxv8i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16f32.nxv16i8( + *, + , + i32); + +define @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8(* %0, %1, i32 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16f32.nxv16i8( + * %0, + %1, + i32 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8( + , + *, + , + , + i32); + +define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vlxe-rv64.ll @@ -0,0 +1,5361 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f,+d -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare @llvm.riscv.vlxe.nxv1i8.nxv1i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i8.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i8.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i8.nxv2i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i8.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i8.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i8.nxv4i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i8.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i8.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i8.nxv8i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i8.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i8.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i16.nxv1i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i16.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i16.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i16.nxv2i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i16.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i16.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i16.nxv4i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i16.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i16.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i16.nxv8i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i16.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i16.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i32.nxv1i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i32.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i32.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i32.nxv2i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i32.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i32.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i32.nxv4i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i32.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i32.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i32.nxv8i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i32.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i32.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i64.nxv1i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i64.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i64.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i64.nxv2i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i64.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i64.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i64.nxv4i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i64.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i64.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i64.nxv8i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i64.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i64.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f16.nxv1i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f16.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f16.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f16.nxv2i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f16.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f16.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f16.nxv4i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f16.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f16.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f16.nxv8i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f16.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f16.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f32.nxv1i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f32.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f32.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f32.nxv2i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f32.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f32.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f32.nxv4i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f32.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f32.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f32.nxv8i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f32.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f32.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f64.nxv1i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f64.nxv1i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f64.nxv1i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f64.nxv2i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f64.nxv2i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f64.nxv2i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f64.nxv4i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f64.nxv4i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f64.nxv4i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f64.nxv8i64( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i64(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f64.nxv8i64( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f64.nxv8i64( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i8.nxv1i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i8.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i8.nxv2i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i8.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i8.nxv4i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i8.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i8.nxv8i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i8.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i8.nxv16i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i8.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i16.nxv1i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i16.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i16.nxv2i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i16.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i16.nxv4i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i16.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i16.nxv8i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i16.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i16.nxv16i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i16.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i32.nxv1i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i32.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i32.nxv2i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i32.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i32.nxv4i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i32.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i32.nxv8i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i32.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i32.nxv16i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i32.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i64.nxv1i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i64.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i64.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i64.nxv2i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i64.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i64.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i64.nxv4i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i64.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i64.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i64.nxv8i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i64.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i64.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f16.nxv1i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f16.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f16.nxv2i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f16.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f16.nxv4i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f16.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f16.nxv8i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f16.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16f16.nxv16i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16f16.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f32.nxv1i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f32.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f32.nxv2i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f32.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f32.nxv4i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f32.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f32.nxv8i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f32.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16f32.nxv16i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16f32.nxv16i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f64.nxv1i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f64.nxv1i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f64.nxv1i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f64.nxv2i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f64.nxv2i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f64.nxv2i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f64.nxv4i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f64.nxv4i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f64.nxv4i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f64.nxv8i32( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i32(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f64.nxv8i32( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f64.nxv8i32( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i8.nxv1i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i8.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i8.nxv2i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i8.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i8.nxv4i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i8.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i8.nxv8i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i8.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i8.nxv16i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i8.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv32i8.nxv32i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv32i8.nxv32i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i16.nxv1i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i16.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i16.nxv2i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i16.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i16.nxv4i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i16.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i16.nxv8i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i16.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i16.nxv16i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i16.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv32i16.nxv32i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv32i16.nxv32i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i32.nxv1i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i32.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i32.nxv2i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i32.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i32.nxv4i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i32.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i32.nxv8i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i32.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i32.nxv16i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i32.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i64.nxv1i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i64.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i64.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i64.nxv2i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i64.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i64.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i64.nxv4i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i64.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i64.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i64.nxv8i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i64.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i64.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f16.nxv1i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f16.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f16.nxv2i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f16.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f16.nxv4i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f16.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f16.nxv8i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f16.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16f16.nxv16i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16f16.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv32f16.nxv32i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv32f16.nxv32i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f32.nxv1i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f32.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f32.nxv2i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f32.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f32.nxv4i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f32.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f32.nxv8i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f32.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16f32.nxv16i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16f32.nxv16i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f64.nxv1i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f64.nxv1i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f64.nxv1i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f64.nxv2i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f64.nxv2i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f64.nxv2i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f64.nxv4i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f64.nxv4i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f64.nxv4i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f64.nxv8i16( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i16(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f64.nxv8i16( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f64.nxv8i16( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i8.nxv1i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i8.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i8.nxv2i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i8.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i8.nxv4i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i8.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i8.nxv8i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i8.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i8.nxv16i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i8.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv32i8.nxv32i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv32i8.nxv32i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv64i8.nxv64i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv64i8.nxv64i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i16.nxv1i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i16.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i16.nxv2i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i16.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i16.nxv4i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i16.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i16.nxv8i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i16.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i16.nxv16i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i16.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv32i16.nxv32i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv32i16.nxv32i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i32.nxv1i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i32_nxv1i32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i32.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i32_nxv1i32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i32.nxv2i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i32_nxv2i32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i32.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i32_nxv2i32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i32.nxv4i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i32_nxv4i32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i32.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i32_nxv4i32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i32.nxv8i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i32_nxv8i32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i32.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i32_nxv8i32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16i32.nxv16i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16i32_nxv16i32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16i32.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16i32_nxv16i32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1i64.nxv1i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1i64_nxv1i64_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1i64.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1i64.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1i64_nxv1i64_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1i64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2i64.nxv2i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2i64_nxv2i64_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2i64.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2i64.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2i64_nxv2i64_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2i64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4i64.nxv4i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4i64_nxv4i64_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4i64.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4i64.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4i64_nxv4i64_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4i64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8i64.nxv8i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8i64_nxv8i64_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8i64.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8i64.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8i64_nxv8i64_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8i64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f16.nxv1i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f16_nxv1f16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f16.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f16_nxv1f16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f16.nxv2i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f16_nxv2f16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f16.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f16_nxv2f16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f16.nxv4i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f16_nxv4f16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f16.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f16_nxv4f16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f16.nxv8i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f16_nxv8f16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f16.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f16_nxv8f16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16f16.nxv16i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16f16_nxv16f16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16f16.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f16_nxv16f16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv32f16.nxv32i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv32f16_nxv32f16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv32f16.nxv32i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv32f16_nxv32f16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f32.nxv1i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f32_nxv1f32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f32.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f32_nxv1f32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f32.nxv2i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f32_nxv2f32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f32.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f32_nxv2f32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f32.nxv4i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f32_nxv4f32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f32.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f32_nxv4f32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f32.nxv8i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f32_nxv8f32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f32.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f32_nxv8f32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv16f32.nxv16i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv16f32_nxv16f32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv16f32.nxv16i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv16f32_nxv16f32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv1f64.nxv1i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv1f64_nxv1f64_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv1f64.nxv1i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv1f64.nxv1i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv1f64_nxv1f64_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv1f64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv2f64.nxv2i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv2f64_nxv2f64_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv2f64.nxv2i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv2f64.nxv2i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv2f64_nxv2f64_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv2f64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv4f64.nxv4i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv4f64_nxv4f64_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv4f64.nxv4i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv4f64.nxv4i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv4f64_nxv4f64_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv4f64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} + +declare @llvm.riscv.vlxe.nxv8f64.nxv8i8( + *, + , + i64); + +define @intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i8(* %0, %1, i64 %2) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_v_nxv8f64_nxv8f64_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + %a = call @llvm.riscv.vlxe.nxv8f64.nxv8i8( + * %0, + %1, + i64 %2) + + ret %a +} + +declare @llvm.riscv.vlxe.mask.nxv8f64.nxv8i8( + , + *, + , + , + i64); + +define @intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vlxe_mask_v_nxv8f64_nxv8f64_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vlxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + %a = call @llvm.riscv.vlxe.mask.nxv8f64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret %a +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv32.ll @@ -0,0 +1,3445 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i8.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i8.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i8.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i8.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i8.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i16.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i16.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i16.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i16.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i16.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i32.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i32.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i32.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i32.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i32.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f16.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f16.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f16.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f16.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16f16.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f32.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f32.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f32.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f32.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16f32.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i8.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i8.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i8.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i8.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i8.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv32i8.nxv32i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i16.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i16.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i16.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i16.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i16.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv32i16.nxv32i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i32.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i32.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i32.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i32.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i32.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f16.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f16.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f16.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f16.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16f16.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv32f16.nxv32i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f32.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f32.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f32.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f32.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16f32.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i8.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i8.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i8.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i8.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i8.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv32i8.nxv32i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv64i8.nxv64i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv64i8.nxv64i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i16.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i16.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i16.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i16.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i16.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv32i16.nxv32i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i32.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i32.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i32.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i32.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i32.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f16.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f16.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f16.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f16.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16f16.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv32f16.nxv32i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f32.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f32.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f32.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f32.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16f32.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsuxe-rv64.ll @@ -0,0 +1,5629 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i8.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i8.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i8.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i8.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i16.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i16.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i16.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i16.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i32.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i32.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i32.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i32.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i64.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i64.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i64.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i64.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f16.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f16.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f16.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f16.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f32.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f32.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f32.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f32.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f64.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f64.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f64.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f64.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i8.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i8.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i8.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i8.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i8.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i16.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i16.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i16.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i16.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i16.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i32.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i32.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i32.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i32.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i32.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i64.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i64.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i64.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i64.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f16.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f16.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f16.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f16.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16f16.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f32.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f32.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f32.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f32.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16f32.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f64.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f64.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f64.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f64.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i8.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i8.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i8.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i8.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i8.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv32i8.nxv32i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i16.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i16.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i16.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i16.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i16.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv32i16.nxv32i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i32.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i32.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i32.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i32.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i32.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i64.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i64.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i64.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i64.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f16.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f16.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f16.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f16.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16f16.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv32f16.nxv32i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f32.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f32.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f32.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f32.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16f32.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f64.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f64.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f64.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f64.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i8.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i8.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i8.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i8.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i8.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i8.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i8.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i8.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i8.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i8.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv32i8.nxv32i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv32i8.nxv32i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv64i8.nxv64i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv64i8.nxv64i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i16.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i16.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i16.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i16.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i16.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i16.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i16.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i16.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i16.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i16.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv32i16.nxv32i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv32i16.nxv32i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i32.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i32_nxv1i32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i32.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i32_nxv1i32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i32.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i32_nxv2i32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i32.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i32_nxv2i32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i32.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i32_nxv4i32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i32.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i32_nxv4i32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i32.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i32_nxv8i32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i32.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i32_nxv8i32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16i32.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16i32_nxv16i32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16i32.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16i32_nxv16i32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1i64.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1i64_nxv1i64_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1i64.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1i64_nxv1i64_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1i64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2i64.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2i64_nxv2i64_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2i64.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2i64_nxv2i64_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2i64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4i64.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4i64_nxv4i64_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4i64.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4i64_nxv4i64_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4i64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8i64.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8i64_nxv8i64_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8i64.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8i64_nxv8i64_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8i64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f16.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f16_nxv1f16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f16.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f16_nxv1f16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f16.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f16_nxv2f16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f16.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f16_nxv2f16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f16.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f16_nxv4f16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f16.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f16_nxv4f16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f16.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f16_nxv8f16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f16.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f16_nxv8f16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16f16.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f16_nxv16f16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16f16.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f16_nxv16f16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv32f16.nxv32i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv32f16_nxv32f16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv32f16.nxv32i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv32f16_nxv32f16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f32.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f32_nxv1f32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f32.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f32_nxv1f32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f32.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f32_nxv2f32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f32.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f32_nxv2f32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f32.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f32_nxv4f32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f32.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f32_nxv4f32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f32.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f32_nxv8f32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f32.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f32_nxv8f32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv16f32.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv16f32_nxv16f32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv16f32.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv16f32_nxv16f32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv1f64.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv1f64_nxv1f64_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv1f64.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv1f64_nxv1f64_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv1f64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv2f64.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv2f64_nxv2f64_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv2f64.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv2f64_nxv2f64_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv2f64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv4f64.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv4f64_nxv4f64_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv4f64.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv4f64_nxv4f64_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv4f64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsuxe.nxv8f64.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_v_nxv8f64_nxv8f64_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsuxe.nxv8f64.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsuxe_mask_v_nxv8f64_nxv8f64_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsuxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsuxe.mask.nxv8f64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll b/llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsxe-rv32.ll @@ -0,0 +1,3445 @@ +; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+f,+experimental-zfh,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare void @llvm.riscv.vsxe.nxv1i8.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i8.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i8.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i8.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i8.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i8.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i8.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i8.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i8.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i8.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i16.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i16.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i16.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i16.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i16.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i16.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i16.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i16.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i16.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i16.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i32.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i32.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i32.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i32.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i32.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i32.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i32.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i32.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i32.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i32.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f16.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f16.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f16.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f16.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f16.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f16.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f16.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f16.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16f16.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16f16.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f32.nxv1i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f32.nxv1i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f32.nxv2i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f32.nxv2i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f32.nxv4i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f32.nxv4i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f32.nxv8i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f32.nxv8i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16f32.nxv16i32( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16f32.nxv16i32( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i32( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i8.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i8.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i8.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i8.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i8.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i8.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i8.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i8.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i8.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i8.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv32i8.nxv32i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv32i8.nxv32i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i16.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i16.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i16.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i16.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i16.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i16.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i16.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i16.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i16.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i16.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv32i16.nxv32i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv32i16.nxv32i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i32.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i32.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i32.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i32.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i32.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i32.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i32.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i32.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i32.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i32.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f16.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f16.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f16.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f16.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f16.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f16.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f16.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f16.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16f16.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16f16.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv32f16.nxv32i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv32f16.nxv32i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f32.nxv1i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f32.nxv1i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f32.nxv2i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f32.nxv2i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f32.nxv4i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f32.nxv4i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f32.nxv8i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f32.nxv8i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16f32.nxv16i16( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16f32.nxv16i16( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i16( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i8.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i8.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i8.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i8.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i8.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i8.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i8.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i8.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i8.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i8.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv32i8.nxv32i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv32i8.nxv32i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv64i8.nxv64i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv64i8.nxv64i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv64i8.nxv64i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i16.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i16.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i16.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i16.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i16.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i16.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i16.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i16.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i16.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i16.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv32i16.nxv32i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv32i16.nxv32i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i32.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i32.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i32.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i32.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i32.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i32.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i32.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i32.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i32.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i32.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f16.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f16.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f16.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f16.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f16.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f16.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f16.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f16.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16f16.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16f16.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv32f16.nxv32i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv32f16.nxv32i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f32.nxv1i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f32.nxv1i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f32.nxv2i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f32.nxv2i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f32.nxv4i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f32.nxv4i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f32.nxv8i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f32.nxv8i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16f32.nxv16i8( + , + *, + , + i32); + +define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, i32 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16f32.nxv16i8( + %0, + * %1, + %2, + i32 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i8( + , + *, + , + , + i32); + +define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i32 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i32 %4) + + ret void +} diff --git a/llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll b/llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll new file mode 100644 --- /dev/null +++ b/llvm/test/CodeGen/RISCV/rvv/vsxe-rv64.ll @@ -0,0 +1,5629 @@ +; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d,+experimental-zfh,+f -verify-machineinstrs \ +; RUN: --riscv-no-aliases < %s | FileCheck %s +declare void @llvm.riscv.vsxe.nxv1i8.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i8.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i8.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i8.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i8.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i8.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i8.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i8.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i16.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i16.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i16.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i16.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i16.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i16.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i16.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i16.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i32.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i32.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i32.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i32.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i32.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i32.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i32.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i32.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i64.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i64.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i64.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i64.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i64.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i64.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i64.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i64.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f16.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f16.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f16.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f16.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f16.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f16.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f16.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f16.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f32.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f32.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f32.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f32.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f32.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f32.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f32.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f32.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f64.nxv1i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f64.nxv1i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f64.nxv2i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f64.nxv2i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f64.nxv4i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f64.nxv4i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f64.nxv8i64( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f64.nxv8i64( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i64( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i64( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i64 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei64.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i64( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i8.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i8.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i8.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i8.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i8.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i8.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i8.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i8.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i8.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i8.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i16.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i16.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i16.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i16.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i16.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i16.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i16.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i16.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i16.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i16.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i32.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i32.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i32.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i32.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i32.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i32.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i32.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i32.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i32.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i32.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i64.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i64.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i64.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i64.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i64.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i64.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i64.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i64.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f16.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f16.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f16.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f16.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f16.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f16.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f16.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f16.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16f16.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16f16.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f32.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f32.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f32.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f32.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f32.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f32.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f32.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f32.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16f32.nxv16i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16f32.nxv16i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f64.nxv1i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f64.nxv1i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f64.nxv2i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f64.nxv2i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f64.nxv4i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f64.nxv4i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f64.nxv8i32( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f64.nxv8i32( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i32( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i32( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i32 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei32.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i32( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i8.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i8.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i8.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i8.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i8.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i8.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i8.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i8.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i8.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i8.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv32i8.nxv32i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv32i8.nxv32i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i16.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i16.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i16.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i16.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i16.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i16.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i16.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i16.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i16.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i16.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv32i16.nxv32i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv32i16.nxv32i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i32.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i32.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i32.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i32.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i32.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i32.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i32.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i32.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i32.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i32.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i64.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i64.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i64.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i64.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i64.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i64.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i64.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i64.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f16.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f16.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f16.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f16.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f16.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f16.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f16.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f16.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16f16.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16f16.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv32f16.nxv32i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv32f16.nxv32i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f32.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f32.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f32.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f32.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f32.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f32.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f32.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f32.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16f32.nxv16i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16f32.nxv16i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f64.nxv1i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f64.nxv1i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f64.nxv2i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f64.nxv2i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f64.nxv4i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f64.nxv4i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f64.nxv8i16( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f64.nxv8i16( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i16( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i16( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i16 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei16.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i16( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i8.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i8.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i8_nxv1i8_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i8.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i8.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i8.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i8_nxv2i8_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i8.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i8.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i8.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i8_nxv4i8_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i8.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i8.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i8.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i8_nxv8i8_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i8.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i8.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i8.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i8_nxv16i8_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i8.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv32i8.nxv32i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv32i8.nxv32i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i8_nxv32i8_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv32i8.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv64i8.nxv64i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv64i8.nxv64i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv64i8.nxv64i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv64i8_nxv64i8_nxv64i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv64i8_nxv64i8_nxv64i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e8,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv64i8.nxv64i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i16.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i16.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i16_nxv1i16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i16.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i16.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i16_nxv2i16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i16.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i16.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i16_nxv4i16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i16.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i16.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i16_nxv8i16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i16.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i16.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i16_nxv16i16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv32i16.nxv32i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv32i16.nxv32i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32i16_nxv32i16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv32i16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i32.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i32_nxv1i32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i32.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i32_nxv1i32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i32.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i32_nxv2i32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i32.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i32_nxv2i32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i32.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i32_nxv4i32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i32.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i32_nxv4i32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i32.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i32_nxv8i32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i32.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i32_nxv8i32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16i32.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16i32_nxv16i32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16i32.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16i32_nxv16i32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16i32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1i64.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1i64_nxv1i64_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1i64.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1i64_nxv1i64_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1i64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2i64.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2i64_nxv2i64_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2i64.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2i64_nxv2i64_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2i64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4i64.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4i64_nxv4i64_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4i64.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4i64_nxv4i64_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4i64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8i64.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8i64_nxv8i64_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8i64.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8i64_nxv8i64_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8i64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f16.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f16_nxv1f16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f16.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f16_nxv1f16_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f16.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f16.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f16_nxv2f16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f16.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f16_nxv2f16_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f16.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f16.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f16_nxv4f16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f16.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f16_nxv4f16_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f16.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f16.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f16_nxv8f16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f16.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f16_nxv8f16_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f16.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16f16.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16f16_nxv16f16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16f16.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f16_nxv16f16_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16f16.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv32f16.nxv32i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv32f16_nxv32f16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv32f16.nxv32i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv32f16_nxv32f16_nxv32i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e16,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv32f16.nxv32i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f32.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f32_nxv1f32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f32.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f32_nxv1f32_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,mf2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f32.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f32.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f32_nxv2f32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f32.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f32_nxv2f32_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f32.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f32.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f32_nxv4f32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f32.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f32_nxv4f32_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f32.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f32.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f32_nxv8f32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f32.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f32_nxv8f32_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f32.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv16f32.nxv16i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv16f32_nxv16f32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv16f32.nxv16i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv16f32_nxv16f32_nxv16i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e32,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv16f32.nxv16i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv1f64.nxv1i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv1f64_nxv1f64_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv1f64.nxv1i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv1f64_nxv1f64_nxv1i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m1,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv1f64.nxv1i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv2f64.nxv2i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv2f64_nxv2f64_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv2f64.nxv2i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv2f64_nxv2f64_nxv2i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m2,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv2f64.nxv2i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv4f64.nxv4i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv4f64_nxv4f64_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv4f64.nxv4i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv4f64_nxv4f64_nxv4i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m4,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv4f64.nxv4i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +} + +declare void @llvm.riscv.vsxe.nxv8f64.nxv8i8( + , + *, + , + i64); + +define void @intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, i64 %3) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_v_nxv8f64_nxv8f64_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}} + call void @llvm.riscv.vsxe.nxv8f64.nxv8i8( + %0, + * %1, + %2, + i64 %3) + + ret void +} + +declare void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i8( + , + *, + , + , + i64); + +define void @intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i8( %0, * %1, %2, %3, i64 %4) nounwind { +entry: +; CHECK-LABEL: intrinsic_vsxe_mask_v_nxv8f64_nxv8f64_nxv8i8 +; CHECK: vsetvli {{.*}}, {{a[0-9]+}}, e64,m8,ta,mu +; CHECK: vsxei8.v {{v[0-9]+}}, (a0), {{v[0-9]+}}, v0.t + call void @llvm.riscv.vsxe.mask.nxv8f64.nxv8i8( + %0, + * %1, + %2, + %3, + i64 %4) + + ret void +}